diff --git a/.github/workflows/clang-format-check.yml b/.github/workflows/clang-format-check.yml index 8fd5b5fb8f7..f9651d16ea5 100644 --- a/.github/workflows/clang-format-check.yml +++ b/.github/workflows/clang-format-check.yml @@ -18,6 +18,6 @@ jobs: with: source: '.' extensions: 'c,h,cpp,hpp,java' - clangFormatVersion: 13 + clangFormatVersion: 17 style: file exclude: './config ./hl/src/H5LTanalyze.c ./hl/src/H5LTparse.c ./hl/src/H5LTparse.h ./src/H5Epubgen.h ./src/H5Einit.h ./src/H5Eterm.h ./src/H5Edefin.h ./src/H5version.h ./src/H5overflow.h' diff --git a/.github/workflows/clang-format-fix.yml b/.github/workflows/clang-format-fix.yml index a99d32d38ad..fb7e6957293 100644 --- a/.github/workflows/clang-format-fix.yml +++ b/.github/workflows/clang-format-fix.yml @@ -30,7 +30,7 @@ jobs: with: source: '.' extensions: 'c,h,cpp,hpp,java' - clangFormatVersion: 13 + clangFormatVersion: 17 inplace: True style: file exclude: './config ./hl/src/H5LTanalyze.c ./hl/src/H5LTparse.c ./hl/src/H5LTparse.h ./src/H5Epubgen.h ./src/H5Einit.h ./src/H5Eterm.h ./src/H5Edefin.h ./src/H5version.h ./src/H5overflow.h' diff --git a/.github/workflows/cmake-bintest.yml b/.github/workflows/cmake-bintest.yml index 379db3a2ca5..3952cf80fa8 100644 --- a/.github/workflows/cmake-bintest.yml +++ b/.github/workflows/cmake-bintest.yml @@ -201,6 +201,6 @@ jobs: HDF5_PLUGIN_PATH: ${{ steps.set-hdf5lib-name.outputs.HDF5_PLUGIN_PATH }} run: | cd "${{ steps.set-hdf5lib-name.outputs.HDF5_ROOT }}/share/HDF5Examples" - cmake --workflow --preset=ci-StdShar-MACOS-Clang --fresh + cmake --workflow --preset=ci-StdShar-macos-Clang --fresh shell: bash diff --git a/.github/workflows/cmake-script.yml b/.github/workflows/cmake-script.yml new file mode 100644 index 00000000000..35541279595 --- /dev/null +++ b/.github/workflows/cmake-script.yml @@ -0,0 +1,801 @@ +name: hdf5 dev ctest script runs + +# Triggers the workflow on a call from another workflow +on: + workflow_call: + inputs: + snap_name: + description: 'The name in the source tarballs' + type: string + required: false + default: hdfsrc + file_base: + description: "The common base name of the source tarballs" + required: true + type: string + use_environ: + description: 'Environment to locate files' + type: string + required: true + default: snapshots + +permissions: + contents: read + +jobs: + build_and_test_win: + # Windows w/ MSVC + CMake + # + name: "Windows MSVC CTest" + runs-on: windows-latest + steps: + - name: Install Dependencies (Windows) + run: choco install ninja + + - name: Install Dependencies + uses: ssciwr/doxygen-install@v1 + with: + version: "1.9.7" + + - name: Enable Developer Command Prompt + uses: ilammy/msvc-dev-cmd@v1.13.0 + + - name: Set file base name (Windows) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + if [[ '${{ inputs.use_environ }}' == 'release' ]] + then + SOURCE_NAME_BASE=$(echo "${{ inputs.snap_name }}") + else + SOURCE_NAME_BASE=$(echo "hdfsrc") + fi + echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT + shell: bash + + # Get files created by release script + - name: Get zip-tarball (Windows) + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: zip-tarball + path: ${{ github.workspace }} + + - name: using powershell + shell: pwsh + run: Get-Location + + - name: List files for the space (Windows) + run: | + Get-ChildItem -Path ${{ github.workspace }} + Get-ChildItem -Path ${{ runner.workspace }} + shell: pwsh + + - name: Uncompress source (Windows) + working-directory: ${{ github.workspace }} + run: 7z x ${{ steps.set-file-base.outputs.FILE_BASE }}.zip + shell: bash + + - name: Copy script files for the space (Windows) + run: | + Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/CTestScript.cmake -Destination ${{ runner.workspace }}/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/HDF5config.cmake -Destination ${{ runner.workspace }}/hdf5/ + shell: pwsh + + - name: List files for the hdf5 (Windows) + run: | + Get-ChildItem -Path ${{ runner.workspace }}/hdf5 + shell: pwsh + + - name: Create options file (Windows) + uses: "DamianReeves/write-file-action@master" + with: + path: ${{ runner.workspace }}/hdf5/HDF5options.cmake + write-mode: overwrite + contents: | + set (CTEST_DROP_SITE_INIT "my.cdash.org") + # Change following line to submit to your CDash dashboard to a different CDash project + #set (CTEST_DROP_LOCATION_INIT "/submit.php?project=HDF5") + set (SITE_BUILDNAME_SUFFIX "${{ steps.set-file-base.outputs.FILE_BASE }}") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} --log-level=VERBOSE") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_CPP_LIB:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_PLUGIN_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DLIBAEC_USE_LOCALCONTENT:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_USE_LOCALCONTENT:BOOL=OFF") + + - name: Run ctest script (Windows) + run: | + cd "${{ runner.workspace }}/hdf5" + ctest -S HDF5config.cmake,CTEST_SITE_EXT=GH,LOCAL_SUBMIT=ON,NINJA=TRUE,BUILD_GENERATOR=VS202264,CTEST_SOURCE_NAME=${{ steps.set-file-base.outputs.SOURCE_BASE }} -C Release -VV -O hdf5.log + shell: bash + continue-on-error: true + + # Save log files created by ctest script + - name: Save log (Windows_intel) + uses: actions/upload-artifact@v4 + with: + name: cl-win-log + path: ${{ runner.workspace }}/hdf5/hdf5.log + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + + build_and_test_linux: + # Linux (Ubuntu) w/ gcc + CMake + # + name: "Ubuntu gcc CMake" + runs-on: ubuntu-latest + steps: + - name: Install CMake Dependencies (Linux) + run: | + sudo apt-get update + sudo apt-get install ninja-build graphviz curl + + - name: Install Dependencies + uses: ssciwr/doxygen-install@v1 + with: + version: "1.9.7" + + - name: Set file base name (Linux) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + if [[ '${{ inputs.use_environ }}' == 'release' ]] + then + SOURCE_NAME_BASE=$(echo "${{ inputs.snap_name }}") + else + SOURCE_NAME_BASE=$(echo "hdfsrc") + fi + echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT + + # Get files created by release script + - name: Get tgz-tarball (Linux) + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: tgz-tarball + path: ${{ github.workspace }} + + - name: List files for the space (Linux) + run: | + ls -l ${{ github.workspace }} + ls ${{ runner.workspace }} + + - name: Uncompress source (Linux) + run: tar -zxvf ${{ github.workspace }}/${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz + + - name: Copy script files for the space (Linux) + run: | + cp ${{ github.workspace }}/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/CTestScript.cmake ${{ runner.workspace }}/hdf5 + cp ${{ github.workspace }}/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/HDF5config.cmake ${{ runner.workspace }}/hdf5 + + - name: List files for the hdf5 (Linux) + run: | + ls ${{ runner.workspace }}/hdf5 + + - name: Create options file (Linux) + uses: "DamianReeves/write-file-action@master" + with: + path: ${{ runner.workspace }}/hdf5/HDF5options.cmake + write-mode: overwrite + contents: | + set (CTEST_DROP_SITE_INIT "my.cdash.org") + # Change following line to submit to your CDash dashboard to a different CDash project + #set (CTEST_DROP_LOCATION_INIT "/submit.php?project=HDF5") + set (SITE_BUILDNAME_SUFFIX "${{ steps.set-file-base.outputs.FILE_BASE }}") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} --log-level=VERBOSE") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_CPP_LIB:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_PLUGIN_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DLIBAEC_USE_LOCALCONTENT:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_USE_LOCALCONTENT:BOOL=OFF") + + - name: Run ctest (Linux) + run: | + cd "${{ runner.workspace }}/hdf5" + ctest -S HDF5config.cmake,CTEST_SITE_EXT=GH-GCC,LOCAL_SUBMIT=ON,NINJA=TRUE,BUILD_GENERATOR=Unix,CTEST_SOURCE_NAME=${{ steps.set-file-base.outputs.SOURCE_BASE }} -C Release -VV -O hdf5.log + shell: bash + continue-on-error: true + + # Save log files created by ctest script + - name: Save log (Linux) + uses: actions/upload-artifact@v4 + with: + name: gcc-ubuntu-log + path: ${{ runner.workspace }}/hdf5/hdf5.log + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + + build_and_test_mac_latest: + # MacOS w/ Clang + CMake + # + name: "MacOS Clang CMake" + runs-on: macos-latest + steps: + - name: Install Dependencies (MacOS_latest) + run: brew install ninja curl + + - name: Install Dependencies + uses: ssciwr/doxygen-install@v1 + with: + version: "1.9.7" + + - name: Set up JDK 19 + uses: actions/setup-java@v4 + with: + java-version: '21' + distribution: 'temurin' + + - name: Set file base name (MacOS_latest) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + if [[ '${{ inputs.use_environ }}' == 'release' ]] + then + SOURCE_NAME_BASE=$(echo "${{ inputs.snap_name }}") + else + SOURCE_NAME_BASE=$(echo "hdfsrc") + fi + echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT + + # Get files created by release script + - name: Get tgz-tarball (MacOS_latest) + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: tgz-tarball + path: ${{ github.workspace }} + + - name: List files for the space (MacOS_latest) + run: | + ls ${{ github.workspace }} + ls ${{ runner.workspace }} + + - name: Uncompress source (MacOS_latest) + run: tar -zxvf ${{ github.workspace }}/${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz + + - name: Copy script files for the space (MacOS_latest) + run: | + cp ${{ github.workspace }}/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/CTestScript.cmake ${{ runner.workspace }}/hdf5 + cp ${{ github.workspace }}/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/HDF5config.cmake ${{ runner.workspace }}/hdf5 + + # symlinks the compiler executables to a common location + - name: Setup GNU Fortran + uses: fortran-lang/setup-fortran@v1 + id: setup-fortran + with: + compiler: gcc + version: 14 + + - name: List files for the hdf5 (MacOS_latest) + run: | + ls ${{ runner.workspace }}/hdf5 + + - name: Create options file (MacOS_latest) + uses: "DamianReeves/write-file-action@master" + with: + path: ${{ runner.workspace }}/hdf5/HDF5options.cmake + write-mode: overwrite + contents: | + set (CTEST_DROP_SITE_INIT "my.cdash.org") + # Change following line to submit to your CDash dashboard to a different CDash project + #set (CTEST_DROP_LOCATION_INIT "/submit.php?project=HDF5") + set (SITE_BUILDNAME_SUFFIX "${{ steps.set-file-base.outputs.FILE_BASE }}") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} --log-level=VERBOSE") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_CPP_LIB:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_PLUGIN_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DLIBAEC_USE_LOCALCONTENT:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_USE_LOCALCONTENT:BOOL=OFF") + + - name: Run ctest (MacOS_latest) + id: run-ctest + run: | + cd "${{ runner.workspace }}/hdf5" + ctest -S HDF5config.cmake,CTEST_SITE_EXT=GH-Clang,LOCAL_SUBMIT=ON,NINJA=TRUE,BUILD_GENERATOR=Unix,CTEST_SOURCE_NAME=${{ steps.set-file-base.outputs.SOURCE_BASE }} -C Release -VV -O hdf5.log + shell: bash + continue-on-error: true + + # Save log files created by ctest script + - name: Save log (MacOS_latest) + uses: actions/upload-artifact@v4 + with: + name: macos-log + path: ${{ runner.workspace }}/hdf5/hdf5.log + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + + build_and_test_S3_linux: + # Linux S3 (Ubuntu) w/ gcc + CMake + # + name: "Ubuntu gcc CMake S3" + runs-on: ubuntu-latest + steps: + - name: Install CMake Dependencies (Linux S3) + run: | + sudo apt-get update + sudo apt-get install ninja-build doxygen graphviz curl + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + + - name: Set file base name (Linux S3) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + if [[ '${{ inputs.use_environ }}' == 'release' ]] + then + SOURCE_NAME_BASE=$(echo "${{ inputs.snap_name }}") + else + SOURCE_NAME_BASE=$(echo "hdfsrc") + fi + echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT + + # Get files created by release script + - name: Get tgz-tarball (Linux S3) + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: tgz-tarball + path: ${{ github.workspace }} + + - name: List files for the space (Linux S3) + run: | + ls -l ${{ github.workspace }} + ls ${{ runner.workspace }} + + - name: Uncompress source (Linux S3) + run: tar -zxvf ${{ github.workspace }}/${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz + + - name: Copy script files for the space (Linux S3) + run: | + cp ${{ github.workspace }}/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/CTestScript.cmake ${{ runner.workspace }}/hdf5 + cp ${{ github.workspace }}/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/HDF5config.cmake ${{ runner.workspace }}/hdf5 + + - name: List files for the hdf5 (Linux S3) + run: | + ls ${{ runner.workspace }}/hdf5 + + - name: Create options file (Linux S3) + uses: "DamianReeves/write-file-action@master" + with: + path: ${{ runner.workspace }}/hdf5/HDF5options.cmake + write-mode: overwrite + contents: | + set (CTEST_DROP_SITE_INIT "my.cdash.org") + # Change following line to submit to your CDash dashboard to a different CDash project + #set (CTEST_DROP_LOCATION_INIT "/submit.php?project=HDF5") + set (SITE_BUILDNAME_SUFFIX "${{ steps.set-file-base.outputs.FILE_BASE }}") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} --log-level=VERBOSE") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_CPP_LIB:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_PLUGIN_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DLIBAEC_USE_LOCALCONTENT:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_USE_LOCALCONTENT:BOOL=OFF") + + - name: Run ctest (Linux S3) + run: | + cd "${{ runner.workspace }}/hdf5" + ctest -S HDF5config.cmake,CTEST_SITE_EXT=GH-S3,LOCAL_SUBMIT=ON,NINJA=TRUE,BUILD_GENERATOR=Unix,CTEST_SOURCE_NAME=${{ steps.set-file-base.outputs.SOURCE_BASE }} -C Release -VV -O hdf5.log + shell: bash + continue-on-error: true + + # Save log files created by ctest script + - name: Save log (Linux S3) + uses: actions/upload-artifact@v4 + with: + name: s3-ubuntu-log + path: ${{ runner.workspace }}/hdf5/hdf5.log + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + +####### intel builds + build_and_test_win_intel: + # Windows w/ OneAPI + CMake + # + name: "Windows Intel CTest" + runs-on: windows-latest + steps: + - name: Install Dependencies (Windows_intel) + run: choco install ninja + + - name: add oneAPI to env + uses: fortran-lang/setup-fortran@v1 + id: setup-fortran + with: + compiler: intel + version: '2024.1' + + - name: Set file base name (Windows_intel) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + if [[ '${{ inputs.use_environ }}' == 'release' ]] + then + SOURCE_NAME_BASE=$(echo "${{ inputs.snap_name }}") + else + SOURCE_NAME_BASE=$(echo "hdfsrc") + fi + echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT + shell: bash + + # Get files created by release script + - name: Get zip-tarball (Windows_intel) + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: zip-tarball + path: ${{ github.workspace }} + + - name: using powershell + shell: pwsh + run: Get-Location + + - name: List files for the space (Windows_intel) + run: | + Get-ChildItem -Path ${{ github.workspace }} + Get-ChildItem -Path ${{ runner.workspace }} + shell: pwsh + + - name: Uncompress source (Windows_intel) + working-directory: ${{ github.workspace }} + run: 7z x ${{ steps.set-file-base.outputs.FILE_BASE }}.zip + shell: bash + + - name: Copy script files for the space (Windows_intel) + run: | + Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/CTestScript.cmake -Destination ${{ runner.workspace }}/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/HDF5config.cmake -Destination ${{ runner.workspace }}/hdf5/ + shell: pwsh + + - name: List files for the hdf5 (Windows_intel) + run: | + Get-ChildItem -Path ${{ runner.workspace }}/hdf5 + shell: pwsh + + - name: Create options file (Windows_intel) + uses: "DamianReeves/write-file-action@master" + with: + path: ${{ runner.workspace }}/hdf5/HDF5options.cmake + write-mode: overwrite + contents: | + set (CTEST_DROP_SITE_INIT "my.cdash.org") + # Change following line to submit to your CDash dashboard to a different CDash project + #set (CTEST_DROP_LOCATION_INIT "/submit.php?project=HDF5") + #set (CMAKE_GENERATOR_TOOLSET "Intel C++ Compiler 2024,fortran=ifx") + set (SITE_BUILDNAME_SUFFIX "${{ steps.set-file-base.outputs.FILE_BASE }}") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} --log-level=VERBOSE") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=config/toolchain/intel.cmake") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_CPP_LIB:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_PLUGIN_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DLIBAEC_USE_LOCALCONTENT:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_USE_LOCALCONTENT:BOOL=OFF") + + - name: Run ctest (Windows_intel) with oneapi + env: + FC: ${{ steps.setup-fortran.outputs.fc }} + CC: ${{ steps.setup-fortran.outputs.cc }} + CXX: ${{ steps.setup-fortran.outputs.cxx }} + BINSIGN: ${{ needs.check-secret.outputs.sign-state }} + SIGNTOOLDIR: ${{ github.workspace }}/Microsoft.Windows.SDK.BuildTools/bin/10.0.22621.0/x64 + run: | + cd "${{ runner.workspace }}/hdf5" + ctest -S HDF5config.cmake,CTEST_SITE_EXT=GH-Intel,LOCAL_SUBMIT=ON,NINJA=TRUE,BUILD_GENERATOR=VS202264,CTEST_SOURCE_NAME=${{ steps.set-file-base.outputs.SOURCE_BASE }} -C Release -VV -O hdf5.log + shell: pwsh + continue-on-error: true + + # Save log files created by ctest script + - name: Save log (Windows_intel) + uses: actions/upload-artifact@v4 + with: + name: intel-win-log + path: ${{ runner.workspace }}/hdf5/hdf5.log + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + + build_and_test_linux_intel: + # Linux (Ubuntu) w/ OneAPI + CMake + # + name: "Ubuntu Intel CMake" + runs-on: ubuntu-latest + steps: + - name: Install CMake Dependencies (Linux_intel) + run: | + sudo apt-get update + sudo apt-get install ninja-build doxygen graphviz curl + + - name: add oneAPI to env + uses: fortran-lang/setup-fortran@v1 + id: setup-fortran + with: + compiler: intel + version: '2024.1' + + - name: Set file base name (Linux_intel) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + if [[ '${{ inputs.use_environ }}' == 'release' ]] + then + SOURCE_NAME_BASE=$(echo "${{ inputs.snap_name }}") + else + SOURCE_NAME_BASE=$(echo "hdfsrc") + fi + echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT + + # Get files created by release script + - name: Get tgz-tarball (Linux_intel) + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: tgz-tarball + path: ${{ github.workspace }} + + - name: List files for the space (Linux_intel) + run: | + ls -l ${{ github.workspace }} + ls ${{ runner.workspace }} + + - name: Uncompress source (Linux_intel) + run: tar -zxvf ${{ github.workspace }}/${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz + + - name: Copy script files for the space (Linux_intel) + run: | + cp ${{ github.workspace }}/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/CTestScript.cmake ${{ runner.workspace }}/hdf5 + cp ${{ github.workspace }}/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/HDF5config.cmake ${{ runner.workspace }}/hdf5 + + - name: List files for the hdf5 (Linux_Linux) + run: | + ls ${{ runner.workspace }}/hdf5 + + - name: Create options file (Linux_intel) + uses: "DamianReeves/write-file-action@master" + with: + path: ${{ runner.workspace }}/hdf5/HDF5options.cmake + write-mode: overwrite + contents: | + set (CTEST_DROP_SITE_INIT "my.cdash.org") + # Change following line to submit to your CDash dashboard to a different CDash project + #set (CTEST_DROP_LOCATION_INIT "/submit.php?project=HDF5") + set (SITE_BUILDNAME_SUFFIX "${{ steps.set-file-base.outputs.FILE_BASE }}") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} --log-level=VERBOSE") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_CPP_LIB:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_PLUGIN_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DLIBAEC_USE_LOCALCONTENT:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_USE_LOCALCONTENT:BOOL=OFF") + + - name: Run ctest (Linux_intel) + env: + FC: ${{ steps.setup-fortran.outputs.fc }} + CC: ${{ steps.setup-fortran.outputs.cc }} + CXX: ${{ steps.setup-fortran.outputs.cxx }} + run: | + cd "${{ runner.workspace }}/hdf5" + ctest -S HDF5config.cmake,CTEST_SITE_EXT=GH-Intel,LOCAL_SUBMIT=ON,NINJA=TRUE,BUILD_GENERATOR=Unix,CTEST_SOURCE_NAME=${{ steps.set-file-base.outputs.SOURCE_BASE }} -C Release -VV -O hdf5.log + shell: bash + continue-on-error: true + + # Save log files created by ctest script + - name: Save log (Linux_intel) + uses: actions/upload-artifact@v4 + with: + name: intel-ubuntu-log + path: ${{ runner.workspace }}/hdf5/hdf5.log + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + +####### clang builds + build_and_test_win_clang: + # Windows w/ clang + CMake + # + name: "Windows ClangCL CTest" + runs-on: windows-latest + steps: + - name: Install Dependencies (Windows_clang) + run: choco install ninja + + - name: add clang to env + uses: KyleMayes/install-llvm-action@v2.0.5 + id: setup-clang + with: + env: true + version: '18' + + - name: Set file base name (Windows_clang) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + if [[ '${{ inputs.use_environ }}' == 'release' ]] + then + SOURCE_NAME_BASE=$(echo "${{ inputs.snap_name }}") + else + SOURCE_NAME_BASE=$(echo "hdfsrc") + fi + echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT + shell: bash + + # Get files created by release script + - name: Get zip-tarball (Windows_clang) + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: zip-tarball + path: ${{ github.workspace }} + + - name: using powershell + shell: pwsh + run: Get-Location + + - name: List files for the space (Windows_clang) + run: | + Get-ChildItem -Path ${{ github.workspace }} + Get-ChildItem -Path ${{ runner.workspace }} + shell: pwsh + + - name: Uncompress source (Windows_clang) + working-directory: ${{ github.workspace }} + run: 7z x ${{ steps.set-file-base.outputs.FILE_BASE }}.zip + shell: bash + + - name: Copy script files for the space (Windows_clang) + run: | + Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/CTestScript.cmake -Destination ${{ runner.workspace }}/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/HDF5config.cmake -Destination ${{ runner.workspace }}/hdf5/ + shell: pwsh + + - name: List files for the hdf5 (Windows_clang) + run: | + Get-ChildItem -Path ${{ runner.workspace }}/hdf5 + shell: pwsh + + - name: Create options file (Windows_clang) + uses: "DamianReeves/write-file-action@master" + with: + path: ${{ runner.workspace }}/hdf5/HDF5options.cmake + write-mode: overwrite + contents: | + set (CTEST_DROP_SITE_INIT "my.cdash.org") + # Change following line to submit to your CDash dashboard to a different CDash project + #set (CTEST_DROP_LOCATION_INIT "/submit.php?project=HDF5") + set (SITE_BUILDNAME_SUFFIX "${{ steps.set-file-base.outputs.FILE_BASE }}") + #set (CMAKE_GENERATOR_TOOLSET "ClangCL") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} --log-level=VERBOSE") + #set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=config/toolchain/clang.cmake") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_CPP_LIB:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_PLUGIN_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DLIBAEC_USE_LOCALCONTENT:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_USE_LOCALCONTENT:BOOL=OFF") + + - name: Run ctest (Windows_clang) with clang + env: + BINSIGN: ${{ needs.check-secret.outputs.sign-state }} + SIGNTOOLDIR: ${{ github.workspace }}/Microsoft.Windows.SDK.BuildTools/bin/10.0.22621.0/x64 + run: | + cd "${{ runner.workspace }}/hdf5" + ctest -S HDF5config.cmake,CTEST_SITE_EXT=GH-Clang,LOCAL_SUBMIT=ON,NINJA=TRUE,BUILD_GENERATOR=VS202264,CTEST_SOURCE_NAME=${{ steps.set-file-base.outputs.SOURCE_BASE }} -C Release -VV -O hdf5.log + shell: pwsh + continue-on-error: true + + # Save log files created by ctest script + - name: Save log (Windows_clang) + uses: actions/upload-artifact@v4 + with: + name: clang-win-log + path: ${{ runner.workspace }}/hdf5/hdf5.log + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + + build_and_test_linux_clang: + # Linux (Ubuntu) w/ clang + CMake + # + name: "Ubuntu Clang CMake" + runs-on: ubuntu-latest + steps: + - name: Install CMake Dependencies (Linux_clang) + run: | + sudo apt-get update + sudo apt-get install ninja-build doxygen graphviz curl libncurses5 + + - name: add clang to env + uses: KyleMayes/install-llvm-action@v2.0.5 + id: setup-clang + with: + env: true + version: '18' + + - name: Set file base name (Linux_clang) + id: set-file-base + run: | + FILE_NAME_BASE=$(echo "${{ inputs.file_base }}") + echo "FILE_BASE=$FILE_NAME_BASE" >> $GITHUB_OUTPUT + if [[ '${{ inputs.use_environ }}' == 'release' ]] + then + SOURCE_NAME_BASE=$(echo "${{ inputs.snap_name }}") + else + SOURCE_NAME_BASE=$(echo "hdfsrc") + fi + echo "SOURCE_BASE=$SOURCE_NAME_BASE" >> $GITHUB_OUTPUT + + # Get files created by release script + - name: Get tgz-tarball (Linux_clang) + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: tgz-tarball + path: ${{ github.workspace }} + + - name: List files for the space (Linux_clang) + run: | + ls -l ${{ github.workspace }} + ls ${{ runner.workspace }} + + - name: Uncompress source (Linux_clang) + run: tar -zxvf ${{ github.workspace }}/${{ steps.set-file-base.outputs.FILE_BASE }}.tar.gz + + - name: Copy script files for the space (Linux_clang) + run: | + cp ${{ github.workspace }}/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/CTestScript.cmake ${{ runner.workspace }}/hdf5 + cp ${{ github.workspace }}/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/scripts/HDF5config.cmake ${{ runner.workspace }}/hdf5 + + - name: List files for the hdf5 (Linux_clang) + run: | + ls ${{ runner.workspace }}/hdf5 + + - name: Create options file (Linux_clang) + uses: "DamianReeves/write-file-action@master" + with: + path: ${{ runner.workspace }}/hdf5/HDF5options.cmake + write-mode: overwrite + contents: | + set (CTEST_DROP_SITE_INIT "my.cdash.org") + # Change following line to submit to your CDash dashboard to a different CDash project + set (SITE_BUILDNAME_SUFFIX "${{ steps.set-file-base.outputs.FILE_BASE }}") + #set (CTEST_DROP_LOCATION_INIT "/submit.php?project=HDF5") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} --log-level=VERBOSE") + #set (CMAKE_GENERATOR_TOOLSET "clang") + #set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=config/toolchain/clang.cmake") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_CPP_LIB:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_PLUGIN_SUPPORT:BOOL=ON") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DLIBAEC_USE_LOCALCONTENT:BOOL=OFF") + set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_USE_LOCALCONTENT:BOOL=OFF") + + - name: Run ctest (Linux_clang) + run: | + cd "${{ runner.workspace }}/hdf5" + ctest -S HDF5config.cmake,CTEST_SITE_EXT=GH-Clang,LOCAL_SUBMIT=ON,NINJA=TRUE,BUILD_GENERATOR=Unix,CTEST_SOURCE_NAME=${{ steps.set-file-base.outputs.SOURCE_BASE }} -C Release -VV -O hdf5.log + shell: bash + continue-on-error: true + + # Save log files created by ctest script + - name: Save log (Linux_clang) + uses: actions/upload-artifact@v4 + with: + name: clang-ubuntu-log + path: ${{ runner.workspace }}/hdf5/hdf5.log + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` diff --git a/.github/workflows/daily-build.yml b/.github/workflows/daily-build.yml index 2b7d6aef3bf..ef98e353ab2 100644 --- a/.github/workflows/daily-build.yml +++ b/.github/workflows/daily-build.yml @@ -3,6 +3,12 @@ name: hdf5 dev daily build # Triggers the workflow on a schedule or on demand on: workflow_dispatch: + inputs: + use_ignore: + description: 'Ignore has_changes check' + type: string + required: false + default: check schedule: - cron: "6 0 * * *" @@ -14,6 +20,7 @@ jobs: runs-on: ubuntu-latest outputs: hdf5-name: ${{ steps.gethdf5base.outputs.HDF5_NAME_BASE }} + run-ignore: ${{ steps.getinputs.outputs.INPUTS_IGNORE }} steps: - uses: actions/checkout@v4.1.7 @@ -23,6 +30,7 @@ jobs: with: version: 'tags/snapshot' file: 'last-file.txt' + continue-on-error: true - name: Read base-name file id: gethdf5base @@ -30,14 +38,31 @@ jobs: - run: echo "hdf5 base name is ${{ steps.gethdf5base.outputs.HDF5_NAME_BASE }}." + - name: Read inputs + id: getinputs + run: | + echo "INPUTS_IGNORE=${{ inputs.use_ignore }}" >> $GITHUB_OUTPUT + + - run: echo "use_ignore is ${{ steps.getinputs.outputs.INPUTS_IGNORE }}." + call-workflow-tarball: + needs: [get-old-names] uses: ./.github/workflows/tarball.yml with: + use_ignore: ${{ needs.get-old-names.outputs.run-ignore }} use_tag: snapshot use_environ: snapshots + call-workflow-c-script: + needs: [get-old-names, call-workflow-tarball] + uses: ./.github/workflows/cmake-script.yml + with: + file_base: ${{ needs.call-workflow-tarball.outputs.file_base }} + use_environ: snapshots + if: ${{ ((needs.call-workflow-tarball.outputs.has_changes == 'true') || (needs.get-old-names.outputs.run-ignore == 'ignore')) }} + call-workflow-ctest: - needs: call-workflow-tarball + needs: [get-old-names, call-workflow-tarball] uses: ./.github/workflows/cmake-ctest.yml with: preset_name: ci-StdShar @@ -54,17 +79,17 @@ jobs: AZURE_ENDPOINT: ${{ secrets.AZURE_ENDPOINT }} AZURE_CODE_SIGNING_NAME: ${{ secrets.AZURE_CODE_SIGNING_NAME }} AZURE_CERT_PROFILE_NAME: ${{ secrets.AZURE_CERT_PROFILE_NAME }} - if: ${{ needs.call-workflow-tarball.outputs.has_changes == 'true' }} + if: ${{ ((needs.call-workflow-tarball.outputs.has_changes == 'true') || (needs.get-old-names.outputs.run-ignore == 'ignore')) }} call-workflow-abi: - needs: [call-workflow-tarball, call-workflow-ctest] + needs: [get-old-names, call-workflow-tarball, call-workflow-ctest] uses: ./.github/workflows/abi-report.yml with: - file_ref: '1.14.4.3' + file_ref: '1.14.5' file_base: ${{ needs.call-workflow-tarball.outputs.file_base }} use_tag: snapshot use_environ: snapshots - if: ${{ needs.call-workflow-tarball.outputs.has_changes == 'true' }} + if: ${{ ((needs.call-workflow-tarball.outputs.has_changes == 'true') || (needs.get-old-names.outputs.run-ignore == 'ignore')) }} call-workflow-release: needs: [get-old-names, call-workflow-tarball, call-workflow-ctest, call-workflow-abi] @@ -77,7 +102,7 @@ jobs: file_sha: ${{ needs.call-workflow-tarball.outputs.file_sha }} use_tag: snapshot use_environ: snapshots - if: ${{ needs.call-workflow-tarball.outputs.has_changes == 'true' }} + if: ${{ ((needs.call-workflow-tarball.outputs.has_changes == 'true') || (needs.get-old-names.outputs.run-ignore == 'ignore')) }} call-workflow-remove: needs: [get-old-names, call-workflow-tarball, call-workflow-ctest, call-workflow-abi, call-workflow-release] @@ -88,5 +113,5 @@ jobs: file_base: ${{ needs.get-old-names.outputs.hdf5-name }} use_tag: snapshot use_environ: snapshots - if: ${{ (needs.call-workflow-tarball.outputs.has_changes == 'true') && (needs.get-old-names.outputs.hdf5-name != needs.call-workflow-tarball.outputs.file_base) }} + if: ${{ ((needs.call-workflow-tarball.outputs.has_changes == 'true') || (needs.get-old-names.outputs.run-ignore == 'ignore')) && (needs.get-old-names.outputs.hdf5-name != needs.call-workflow-tarball.outputs.file_base) }} diff --git a/.github/workflows/h5py.yml b/.github/workflows/h5py.yml index ddc1bee6156..a0c39d67f59 100644 --- a/.github/workflows/h5py.yml +++ b/.github/workflows/h5py.yml @@ -24,8 +24,8 @@ jobs: sed -i 's/hdf5@1.10.4:1.14/hdf5@1.10.4:/g' \ ./spack/var/spack/repos/builtin/packages/py-h5py/package.py . ./spack/share/spack/setup-env.sh - ./spack/bin/spack spec py-h5py@master+mpi ^hdf5@develop-1.15 - ./spack/bin/spack install py-h5py@master+mpi ^hdf5@develop-1.15 + ./spack/bin/spack spec py-h5py@master+mpi ^hdf5@develop-1.17 + ./spack/bin/spack install py-h5py@master+mpi ^hdf5@develop-1.17 ./spack/bin/spack install py-pytest ./spack/bin/spack install py-ipython spack load py-h5py diff --git a/.github/workflows/hdfeos5.yml b/.github/workflows/hdfeos5.yml new file mode 100644 index 00000000000..0d5cf969382 --- /dev/null +++ b/.github/workflows/hdfeos5.yml @@ -0,0 +1,51 @@ +name: hdfeos5 dev + +# Triggers the workflow on push or pull request or on demand +on: + workflow_dispatch: + push: + pull_request: + branches: [ develop ] + paths-ignore: + - '.github/CODEOWNERS' + - '.github/FUNDING.yml' + - 'doc/**' + - 'release_docs/**' + - 'ACKNOWLEDGEMENTS' + - 'COPYING**' + - '**.md' + +# Using concurrency to cancel any in-progress job or run +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + build: + name: Build hdfeos5 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4.1.7 + + - name: Install Autotools Dependencies (Linux) + run: | + sudo apt update + sudo apt install automake autoconf libtool libtool-bin + - name: Install HDF5 + run: | + ./autogen.sh + ./configure --prefix=/usr/local --disable-tests --with-default-api-version=v16 + make + sudo make install + - name: Install HDF-EOS5 + run: | + wget -O HDF-EOS5.2.0.tar.gz "https://git.earthdata.nasa.gov/projects/DAS/repos/hdfeos5/raw/hdf-eos5-2.0-src.tar.gz?at=refs%2Fheads%2FHDFEOS5_2.0" + tar zxvf HDF-EOS5.2.0.tar.gz + cd hdf-eos5-2.0 + ./configure CC=/usr/local/bin/h5cc --prefix=/usr/local/ --enable-install-include + make + make check + sudo make install diff --git a/.github/workflows/intel-auto.yml b/.github/workflows/intel-auto.yml index 6e0380efb4e..6f80a79eebc 100644 --- a/.github/workflows/intel-auto.yml +++ b/.github/workflows/intel-auto.yml @@ -1,4 +1,4 @@ -name: hdf5 dev autotools icx +name: hdf5 dev autotools icx CI # Triggers the workflow on a call from another workflow on: diff --git a/.github/workflows/intel-cmake.yml b/.github/workflows/intel-cmake.yml index fb703d480b3..2816fa95a25 100644 --- a/.github/workflows/intel-cmake.yml +++ b/.github/workflows/intel-cmake.yml @@ -1,4 +1,4 @@ -name: hdf5 dev CMake icx +name: hdf5 dev CMake icx CI # Triggers the workflow on a call from another workflow on: diff --git a/.github/workflows/main-cmake-spc.yml b/.github/workflows/main-cmake-spc.yml index df9ccff495c..b83c80beb9f 100644 --- a/.github/workflows/main-cmake-spc.yml +++ b/.github/workflows/main-cmake-spc.yml @@ -77,6 +77,66 @@ jobs: # # - name: CMake Run Tests # run: ctest . --parallel 2 -C Debug -V +# working-directory: ${{ runner.workspace }}/build + + build_v1_8: + name: "gcc DBG v1.8 default API" + runs-on: ubuntu-latest + steps: + # SETUP + - name: Install Linux Dependencies + run: | + sudo apt-get update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt install gcc-12 g++-12 gfortran-12 + echo "CC=gcc-12" >> $GITHUB_ENV + echo "CXX=g++-12" >> $GITHUB_ENV + echo "FC=gfortran-12" >> $GITHUB_ENV + + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - name: Get Sources + uses: actions/checkout@v4.1.7 + + # + # CMAKE CONFIGURE + # + - name: CMake Configure + run: | + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake \ + -G Ninja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DBUILD_SHARED_LIBS=ON \ + -DHDF5_ENABLE_ALL_WARNINGS=ON \ + -DHDF5_ENABLE_PARALLEL:BOOL=OFF \ + -DHDF5_BUILD_CPP_LIB:BOOL=ON \ + -DHDF5_BUILD_FORTRAN=ON \ + -DHDF5_BUILD_JAVA=ON \ + -DHDF5_BUILD_DOC=OFF \ + -DLIBAEC_USE_LOCALCONTENT=OFF \ + -DZLIB_USE_LOCALCONTENT=OFF \ + -DHDF5_ENABLE_MIRROR_VFD:BOOL=ON \ + -DHDF5_ENABLE_DIRECT_VFD:BOOL=ON \ + -DHDF5_ENABLE_ROS3_VFD:BOOL=ON \ + -DH5_NO_DEPRECATED_SYMBOLS:BOOL=OFF \ + -DDEFAULT_API_VERSION:STRING=v18 \ + $GITHUB_WORKSPACE + shell: bash + + # + # BUILD + # + - name: CMake Build + run: cmake --build . --parallel 3 --config Debug + working-directory: ${{ runner.workspace }}/build + + # + # RUN TESTS - disable until some tests are fixed + # +# - name: CMake Run Tests +# run: ctest . --parallel 2 -C Debug -V # working-directory: ${{ runner.workspace }}/build build_v1_10: @@ -199,6 +259,126 @@ jobs: run: ctest . --parallel 2 -C Debug -V working-directory: ${{ runner.workspace }}/build + build_v1_14: + name: "gcc DBG v1.14 default API" + runs-on: ubuntu-latest + steps: + # SETUP + - name: Install Linux Dependencies + run: | + sudo apt-get update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt install gcc-12 g++-12 gfortran-12 + echo "CC=gcc-12" >> $GITHUB_ENV + echo "CXX=g++-12" >> $GITHUB_ENV + echo "FC=gfortran-12" >> $GITHUB_ENV + + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - name: Get Sources + uses: actions/checkout@v4.1.7 + + # + # CMAKE CONFIGURE + # + - name: CMake Configure + run: | + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake \ + -G Ninja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DBUILD_SHARED_LIBS=ON \ + -DHDF5_ENABLE_ALL_WARNINGS=ON \ + -DHDF5_ENABLE_PARALLEL:BOOL=OFF \ + -DHDF5_BUILD_CPP_LIB:BOOL=ON \ + -DHDF5_BUILD_FORTRAN=ON \ + -DHDF5_BUILD_JAVA=ON \ + -DHDF5_BUILD_DOC=OFF \ + -DLIBAEC_USE_LOCALCONTENT=OFF \ + -DZLIB_USE_LOCALCONTENT=OFF \ + -DHDF5_ENABLE_MIRROR_VFD:BOOL=ON \ + -DHDF5_ENABLE_DIRECT_VFD:BOOL=ON \ + -DHDF5_ENABLE_ROS3_VFD:BOOL=ON \ + -DH5_NO_DEPRECATED_SYMBOLS:BOOL=OFF \ + -DDEFAULT_API_VERSION:STRING=v114 \ + $GITHUB_WORKSPACE + shell: bash + + # + # BUILD + # + - name: CMake Build + run: cmake --build . --parallel 3 --config Debug + working-directory: ${{ runner.workspace }}/build + + # + # RUN TESTS + # + - name: CMake Run Tests + run: ctest . --parallel 2 -C Debug -V + working-directory: ${{ runner.workspace }}/build + + build_v1_16: + name: "gcc DBG v1.16 default API" + runs-on: ubuntu-latest + steps: + # SETUP + - name: Install Linux Dependencies + run: | + sudo apt-get update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt install gcc-12 g++-12 gfortran-12 + echo "CC=gcc-12" >> $GITHUB_ENV + echo "CXX=g++-12" >> $GITHUB_ENV + echo "FC=gfortran-12" >> $GITHUB_ENV + + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - name: Get Sources + uses: actions/checkout@v4.1.7 + + # + # CMAKE CONFIGURE + # + - name: CMake Configure + run: | + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake \ + -G Ninja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DBUILD_SHARED_LIBS=ON \ + -DHDF5_ENABLE_ALL_WARNINGS=ON \ + -DHDF5_ENABLE_PARALLEL:BOOL=OFF \ + -DHDF5_BUILD_CPP_LIB:BOOL=ON \ + -DHDF5_BUILD_FORTRAN=ON \ + -DHDF5_BUILD_JAVA=ON \ + -DHDF5_BUILD_DOC=OFF \ + -DLIBAEC_USE_LOCALCONTENT=OFF \ + -DZLIB_USE_LOCALCONTENT=OFF \ + -DHDF5_ENABLE_MIRROR_VFD:BOOL=ON \ + -DHDF5_ENABLE_DIRECT_VFD:BOOL=ON \ + -DHDF5_ENABLE_ROS3_VFD:BOOL=ON \ + -DH5_NO_DEPRECATED_SYMBOLS:BOOL=OFF \ + -DDEFAULT_API_VERSION:STRING=v116 \ + $GITHUB_WORKSPACE + shell: bash + + # + # BUILD + # + - name: CMake Build + run: cmake --build . --parallel 3 --config Debug + working-directory: ${{ runner.workspace }}/build + + # + # RUN TESTS + # + - name: CMake Run Tests + run: ctest . --parallel 2 -C Debug -V + working-directory: ${{ runner.workspace }}/build + build_zlibng: name: "gcc use zlib-ng filter" runs-on: ubuntu-latest diff --git a/.github/workflows/publish-release.yml b/.github/workflows/publish-release.yml index dd7f4bfbc86..19e01999487 100644 --- a/.github/workflows/publish-release.yml +++ b/.github/workflows/publish-release.yml @@ -58,3 +58,7 @@ jobs: run: | aws s3 sync ./${{ inputs.file_name }}.doxygen s3://${{ secrets.AWS_S3_BUCKET }}/${{ vars.TARGET_PATH }}/${{ inputs.target_dir }}/documentation/doxygen --delete + - name: Sync userguide to latest S3 bucket + run: | + aws s3 sync ./${{ inputs.file_name }}.doxygen s3://${{ secrets.AWS_S3_BUCKET }}/documentation/hdf5/latest --delete + diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index d59f8c9bc6b..d3f9fa0f986 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -67,6 +67,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 + uses: github/codeql-action/upload-sarif@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 with: sarif_file: results.sarif diff --git a/.github/workflows/tarball.yml b/.github/workflows/tarball.yml index a77c0418d45..483547f8af1 100644 --- a/.github/workflows/tarball.yml +++ b/.github/workflows/tarball.yml @@ -4,6 +4,11 @@ name: hdf5 dev tarball on: workflow_call: inputs: + use_ignore: + description: 'Ignore has_changes check' + type: string + required: false + default: check use_tag: description: 'Release version tag' type: string @@ -43,6 +48,8 @@ jobs: branch_ref: ${{ steps.get-branch-name.outputs.BRANCH_REF }} branch_sha: ${{ steps.get-branch-sha.outputs.BRANCH_SHA }} steps: + - uses: actions/checkout@v4 + - name: Get branch name id: get-branch-name env: @@ -67,7 +74,7 @@ jobs: with: seconds: 86400 # One day in seconds branch: '${{ steps.get-branch-name.outputs.branch_ref }}' - if: ${{ inputs.use_environ == 'snapshots' }} + if: ${{ (inputs.use_environ == 'snapshots' && inputs.use_ignore == 'check') }} - run: echo "You have ${{ steps.check-new-commits.outputs.new-commits-number }} new commit(s) in ${{ steps.get-branch-name.outputs.BRANCH_REF }} ✅!" if: ${{ steps.check-new-commits.outputs.has-new-commits == 'true' }} @@ -78,7 +85,7 @@ jobs: name: Create a source tarball runs-on: ubuntu-latest needs: check_commits - if: ${{ ((inputs.use_environ == 'snapshots') && (needs.check_commits.outputs.has_changes == 'true')) || (inputs.use_environ == 'release') }} + if: ${{ ((inputs.use_environ == 'snapshots') && ((needs.check_commits.outputs.has_changes == 'true') || (inputs.use_ignore == 'ignore'))) || (inputs.use_environ == 'release') }} outputs: file_base: ${{ steps.set-file-base.outputs.FILE_BASE }} source_base: ${{ steps.version.outputs.SOURCE_TAG }} diff --git a/CMakeLists.txt b/CMakeLists.txt index 5382a8ff8b7..2dc3af4a8ef 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -159,6 +159,7 @@ set (HDF5_LIB_BASE "hdf5") set (HDF5_LIB_CORE "") set (HDF5_TEST_LIB_CORE "_test") +set (HDF5_TEST_PAR_LIB_CORE "_testpar") set (HDF5_CPP_LIB_CORE "_cpp") set (HDF5_HL_LIB_CORE "_hl") set (HDF5_HL_CPP_LIB_CORE "_hl_cpp") @@ -174,6 +175,7 @@ set (HDF5_JAVA_JNI_LIB_CORE "_java") set (HDF5_LIB_CORENAME "${HDF5_LIB_BASE}") set (HDF5_TEST_LIB_CORENAME "${HDF5_LIB_BASE}${HDF5_TEST_LIB_CORE}") +set (HDF5_TEST_PAR_LIB_CORENAME "${HDF5_LIB_BASE}${HDF5_TEST_PAR_LIB_CORE}") set (HDF5_CPP_LIB_CORENAME "${HDF5_LIB_BASE}${HDF5_CPP_LIB_CORE}") set (HDF5_HL_LIB_CORENAME "${HDF5_LIB_BASE}${HDF5_HL_LIB_CORE}") set (HDF5_HL_CPP_LIB_CORENAME "${HDF5_LIB_BASE}${HDF5_HL_CPP_LIB_CORE}") @@ -194,6 +196,7 @@ set (HDF5_JAVA_TEST_LIB_CORENAME "jartest5") #----------------------------------------------------------------------------- set (HDF5_LIB_NAME "${HDF5_EXTERNAL_LIB_PREFIX}${HDF5_LIB_BASE}${HDF5_LIB_INFIX}${HDF5_LIB_CORE}${HDF5_EXTERNAL_LIB_SUFFIX}") set (HDF5_TEST_LIB_NAME "${HDF5_EXTERNAL_LIB_PREFIX}${HDF5_LIB_BASE}${HDF5_LIB_INFIX}${HDF5_TEST_LIB_CORE}${HDF5_EXTERNAL_LIB_SUFFIX}") +set (HDF5_TEST_PAR_LIB_NAME "${HDF5_EXTERNAL_LIB_PREFIX}${HDF5_LIB_BASE}${HDF5_LIB_INFIX}${HDF5_TEST_PAR_LIB_CORE}${HDF5_EXTERNAL_LIB_SUFFIX}") set (HDF5_CPP_LIB_NAME "${HDF5_EXTERNAL_LIB_PREFIX}${HDF5_LIB_BASE}${HDF5_LIB_INFIX}${HDF5_CPP_LIB_CORE}${HDF5_EXTERNAL_LIB_SUFFIX}") set (HDF5_HL_LIB_NAME "${HDF5_EXTERNAL_LIB_PREFIX}${HDF5_LIB_BASE}${HDF5_LIB_INFIX}${HDF5_HL_LIB_CORE}${HDF5_EXTERNAL_LIB_SUFFIX}") set (HDF5_HL_CPP_LIB_NAME "${HDF5_EXTERNAL_LIB_PREFIX}${HDF5_LIB_BASE}${HDF5_LIB_INFIX}${HDF5_HL_CPP_LIB_CORE}${HDF5_EXTERNAL_LIB_SUFFIX}") @@ -216,6 +219,7 @@ set (HDF5_JAVA_TEST_LIB_NAME "${HDF5_EXTERNAL_LIB_PREFIX}${HDF5_JAVA_TEST_LIB #----------------------------------------------------------------------------- set (HDF5_LIB_TARGET "${HDF5_LIB_CORENAME}-static") set (HDF5_TEST_LIB_TARGET "${HDF5_TEST_LIB_CORENAME}-static") +set (HDF5_TEST_PAR_LIB_TARGET "${HDF5_TEST_PAR_LIB_CORENAME}-static") set (HDF5_CPP_LIB_TARGET "${HDF5_CPP_LIB_CORENAME}-static") set (HDF5_HL_LIB_TARGET "${HDF5_HL_LIB_CORENAME}-static") set (HDF5_HL_CPP_LIB_TARGET "${HDF5_HL_CPP_LIB_CORENAME}-static") @@ -232,6 +236,7 @@ set (HDF5_JAVA_HDF5_LIB_TARGET "${HDF5_JAVA_HDF5_LIB_CORENAME}") set (HDF5_JAVA_TEST_LIB_TARGET "${HDF5_JAVA_TEST_LIB_CORENAME}") set (HDF5_LIBSH_TARGET "${HDF5_LIB_CORENAME}-shared") set (HDF5_TEST_LIBSH_TARGET "${HDF5_TEST_LIB_CORENAME}-shared") +set (HDF5_TEST_PAR_LIBSH_TARGET "${HDF5_TEST_PAR_LIB_CORENAME}-shared") set (HDF5_CPP_LIBSH_TARGET "${HDF5_CPP_LIB_CORENAME}-shared") set (HDF5_HL_LIBSH_TARGET "${HDF5_HL_LIB_CORENAME}-shared") set (HDF5_HL_CPP_LIBSH_TARGET "${HDF5_HL_CPP_LIB_CORENAME}-shared") @@ -851,9 +856,8 @@ if (HDF5_ENABLE_SUBFILING_VFD) endif() -#option (DEFAULT_API_VERSION "Enable v1.16 API (v16, v18, v110, v112, v114, v116)" "v116") -set (DEFAULT_API_VERSION "v116" CACHE STRING "Enable v1.16 API (v16, v18, v110, v112, v114, v116)") -set_property (CACHE DEFAULT_API_VERSION PROPERTY STRINGS v16 v18 v110 v112 v114 v116) +set (DEFAULT_API_VERSION "v118" CACHE STRING "Enable v1.16 API (v16, v18, v110, v112, v114, v116, v118)") +set_property (CACHE DEFAULT_API_VERSION PROPERTY STRINGS v16 v18 v110 v112 v114 v116 v118) #----------------------------------------------------------------------------- # Option to use 1.6.x API #----------------------------------------------------------------------------- @@ -897,14 +901,22 @@ endif () #----------------------------------------------------------------------------- # Option to use 1.16.x API #----------------------------------------------------------------------------- -if (NOT DEFAULT_API_VERSION) - set (DEFAULT_API_VERSION "v116") -endif () set (H5_USE_116_API_DEFAULT 0) if (DEFAULT_API_VERSION MATCHES "v116") set (H5_USE_116_API_DEFAULT 1) endif () +#----------------------------------------------------------------------------- +# Option to use 1.18.x API +#----------------------------------------------------------------------------- +if (NOT DEFAULT_API_VERSION) + set (DEFAULT_API_VERSION "v118") +endif () +set (H5_USE_118_API_DEFAULT 0) +if (DEFAULT_API_VERSION MATCHES "v118") + set (H5_USE_118_API_DEFAULT 1) +endif () + #----------------------------------------------------------------------------- # Include user macros #----------------------------------------------------------------------------- diff --git a/HDF5Examples/C/H5T/h5ex_t_objref.c b/HDF5Examples/C/H5T/h5ex_t_objref.c index e6c2de95d66..db20bc0eb11 100644 --- a/HDF5Examples/C/H5T/h5ex_t_objref.c +++ b/HDF5Examples/C/H5T/h5ex_t_objref.c @@ -124,7 +124,7 @@ main(void) #if H5_VERSION_GE(1, 12, 0) && !defined(H5_USE_110_API) && !defined(H5_USE_18_API) && !defined(H5_USE_16_API) rdata = (H5R_ref_t *)malloc(dims[0] * sizeof(H5R_ref_t)); #else - rdata = (hobj_ref_t *)malloc(dims[0] * sizeof(hobj_ref_t)); + rdata = (hobj_ref_t *)malloc(dims[0] * sizeof(hobj_ref_t)); #endif /* * Read the data. diff --git a/HDF5Examples/C/H5T/h5ex_t_objrefatt.c b/HDF5Examples/C/H5T/h5ex_t_objrefatt.c index 562364a203e..9489bcb2f11 100644 --- a/HDF5Examples/C/H5T/h5ex_t_objrefatt.c +++ b/HDF5Examples/C/H5T/h5ex_t_objrefatt.c @@ -136,7 +136,7 @@ main(void) #if H5_VERSION_GE(1, 12, 0) && !defined(H5_USE_110_API) && !defined(H5_USE_18_API) && !defined(H5_USE_16_API) rdata = (H5R_ref_t *)malloc(dims[0] * sizeof(H5R_ref_t)); #else - rdata = (hobj_ref_t *)malloc(dims[0] * sizeof(hobj_ref_t)); + rdata = (hobj_ref_t *)malloc(dims[0] * sizeof(hobj_ref_t)); #endif /* * Read the data. diff --git a/HDF5Examples/C/H5T/h5ex_t_regref.c b/HDF5Examples/C/H5T/h5ex_t_regref.c index e6d4cef2c32..6766198cee6 100644 --- a/HDF5Examples/C/H5T/h5ex_t_regref.c +++ b/HDF5Examples/C/H5T/h5ex_t_regref.c @@ -154,7 +154,7 @@ main(void) #if H5_VERSION_GE(1, 12, 0) && !defined(H5_USE_110_API) && !defined(H5_USE_18_API) && !defined(H5_USE_16_API) rdata = (H5R_ref_t *)malloc(dims[0] * sizeof(H5R_ref_t)); #else - rdata = (hdset_reg_ref_t *)malloc(dims[0] * sizeof(hdset_reg_ref_t)); + rdata = (hdset_reg_ref_t *)malloc(dims[0] * sizeof(hdset_reg_ref_t)); #endif status = H5Sclose(space); diff --git a/HDF5Examples/C/H5T/h5ex_t_regrefatt.c b/HDF5Examples/C/H5T/h5ex_t_regrefatt.c index bb31b707c51..dab4e28cbaf 100644 --- a/HDF5Examples/C/H5T/h5ex_t_regrefatt.c +++ b/HDF5Examples/C/H5T/h5ex_t_regrefatt.c @@ -169,7 +169,7 @@ main(void) #if H5_VERSION_GE(1, 12, 0) && !defined(H5_USE_110_API) && !defined(H5_USE_18_API) && !defined(H5_USE_16_API) rdata = (H5R_ref_t *)malloc(dims[0] * sizeof(H5R_ref_t)); #else - rdata = (hdset_reg_ref_t *)malloc(dims[0] * sizeof(hdset_reg_ref_t)); + rdata = (hdset_reg_ref_t *)malloc(dims[0] * sizeof(hdset_reg_ref_t)); #endif status = H5Sclose(space); diff --git a/HDF5Examples/C/TUTR/h5_extend.c b/HDF5Examples/C/TUTR/h5_extend.c index 91d7db70fc9..0dde217db30 100644 --- a/HDF5Examples/C/TUTR/h5_extend.c +++ b/HDF5Examples/C/TUTR/h5_extend.c @@ -36,8 +36,8 @@ main(void) herr_t status; hsize_t chunk_dims[2] = {2, 5}; int data[3][3] = {{1, 1, 1}, /* data to write */ - {1, 1, 1}, - {1, 1, 1}}; + {1, 1, 1}, + {1, 1, 1}}; /* Variables used in extending and writing to the extended portion of dataset */ hsize_t size[2]; diff --git a/HDF5Examples/C/TUTR/testh5cc.sh.in b/HDF5Examples/C/TUTR/testh5cc.sh.in index f95108ddb99..4b888c1d2a0 100644 --- a/HDF5Examples/C/TUTR/testh5cc.sh.in +++ b/HDF5Examples/C/TUTR/testh5cc.sh.in @@ -44,6 +44,7 @@ H5_USE_110_API_DEFAULT=`grep '#define H5_USE_110_API_DEFAULT ' ../src/H5pubconf. H5_USE_112_API_DEFAULT=`grep '#define H5_USE_112_API_DEFAULT ' ../src/H5pubconf.h` H5_USE_114_API_DEFAULT=`grep '#define H5_USE_114_API_DEFAULT ' ../src/H5pubconf.h` H5_USE_116_API_DEFAULT=`grep '#define H5_USE_116_API_DEFAULT ' ../src/H5pubconf.h` +H5_USE_118_API_DEFAULT=`grep '#define H5_USE_118_API_DEFAULT ' ../src/H5pubconf.h` # setup my machine information. myos=`uname -s` @@ -483,6 +484,8 @@ elif [ -n "$H5_USE_114_API_DEFAULT" ]; then echo "H5_USE_114_API_DEFAULT is defined." elif [ -n "$H5_USE_116_API_DEFAULT" ]; then echo "H5_USE_116_API_DEFAULT is defined." +elif [ -n "$H5_USE_118_API_DEFAULT" ]; then + echo "H5_USE_118_API_DEFAULT is defined." else echo "No H5 API_DEFAULT is defined." fi @@ -523,17 +526,27 @@ elif [ -n "$H5_USE_114_API_DEFAULT" ]; then TOOLTEST -DH5_USE_110_API_DEFAULT $v110main TOOLTEST -DH5_USE_112_API_DEFAULT $v112main TOOLTEST $v114main -else +elif [ -n "$H5_USE_116_API_DEFAULT" ]; then echo "Testing HDF5 with 116_API_DEFAULT" TOOLTEST -DH5_USE_16_API_DEFAULT $v16main TOOLTEST -DH5_USE_18_API_DEFAULT $v18main TOOLTEST -DH5_USE_110_API_DEFAULT $v110main TOOLTEST -DH5_USE_112_API_DEFAULT $v112main TOOLTEST -DH5_USE_114_API_DEFAULT $v114main + TOOLTEST $v116main +else + echo "Testing HDF5 with 118_API_DEFAULT" + TOOLTEST -DH5_USE_16_API_DEFAULT $v16main + TOOLTEST -DH5_USE_18_API_DEFAULT $v18main + TOOLTEST -DH5_USE_110_API_DEFAULT $v110main + TOOLTEST -DH5_USE_112_API_DEFAULT $v112main + TOOLTEST -DH5_USE_114_API_DEFAULT $v114main + TOOLTEST -DH5_USE_116_API_DEFAULT $v116main TOOLTEST $v18main TOOLTEST $v110main TOOLTEST $v112main TOOLTEST $v114main + TOOLTEST $v116main fi # Group 6: # HDF5 program that depends on input args. diff --git a/HDF5Examples/CMakePresets.json b/HDF5Examples/CMakePresets.json index 0f2c1b5e013..1f722cc4e20 100644 --- a/HDF5Examples/CMakePresets.json +++ b/HDF5Examples/CMakePresets.json @@ -74,6 +74,24 @@ "ci-StdShar" ] }, + { + "name": "ci-StdShar-macos-Clang", + "description": "Clang Standard Config for macos (Release)", + "inherits": [ + "ci-macos-arm64-Release-Clang", + "ci-StdJava", + "ci-StdShar" + ] + }, + { + "name": "ci-StdShar-macos-GNUC", + "description": "GNUC Standard Config for macos (Release)", + "inherits": [ + "ci-macos-arm64-Release-GNUC", + "ci-StdJava", + "ci-StdShar" + ] + }, { "name": "ci-StdShar-Intel", "description": "Intel Standard Config for x64 (Release)", @@ -111,6 +129,23 @@ "ci-x64-Release-GNUC" ] }, + { + "name": "ci-StdShar-macos-Clang", + "description": "Clang Standard Build for macos-arm64 (Release)", + "configurePreset": "ci-StdShar-macos-Clang", + "inherits": [ + "ci-macos-arm64-Release-Clang" + ] + }, + { + "name": "ci-StdShar-macos-GNUC", + "description": "GNUC Standard Build for macos-arm64 (Release)", + "configurePreset": "ci-StdShar-macos-GNUC", + "verbose": true, + "inherits": [ + "ci-macos-arm64-Release-GNUC" + ] + }, { "name": "ci-StdShar-Intel", "description": "Intel Standard Build for x64 (Release)", @@ -137,22 +172,24 @@ ] }, { - "name": "ci-StdShar-MACOS-Clang", - "configurePreset": "ci-StdShar-Clang", + "name": "ci-StdShar-macos-Clang", + "configurePreset": "ci-StdShar-macos-Clang", "inherits": [ - "ci-x64-Release-Clang" + "ci-macos-arm64-Release-Clang" ], "execution": { "noTestsAction": "error", "timeout": 180, "jobs": 2 - }, - "condition": { - "type": "equals", - "lhs": "${hostSystemName}", - "rhs": "Darwin" } }, + { + "name": "ci-StdShar-macos-GNUC", + "configurePreset": "ci-StdShar-macos-GNUC", + "inherits": [ + "ci-macos-arm64-Release-GNUC" + ] + }, { "name": "ci-StdShar-GNUC", "configurePreset": "ci-StdShar-GNUC", @@ -203,11 +240,11 @@ ] }, { - "name": "ci-StdShar-MACOS-Clang", + "name": "ci-StdShar-macos-Clang", "steps": [ - {"type": "configure", "name": "ci-StdShar-Clang"}, - {"type": "build", "name": "ci-StdShar-Clang"}, - {"type": "test", "name": "ci-StdShar-MACOS-Clang"} + {"type": "configure", "name": "ci-StdShar-macos-Clang"}, + {"type": "build", "name": "ci-StdShar-macos-Clang"}, + {"type": "test", "name": "ci-StdShar-macos-Clang"} ] }, { @@ -218,6 +255,14 @@ {"type": "test", "name": "ci-StdShar-GNUC"} ] }, + { + "name": "ci-StdShar-macos-GNUC", + "steps": [ + {"type": "configure", "name": "ci-StdShar-macos-GNUC"}, + {"type": "build", "name": "ci-StdShar-macos-GNUC"}, + {"type": "test", "name": "ci-StdShar-macos-GNUC"} + ] + }, { "name": "ci-StdShar-Intel", "steps": [ diff --git a/HDF5Examples/CXX/TUTR/h5tutr_extend.cpp b/HDF5Examples/CXX/TUTR/h5tutr_extend.cpp index 3916f9ab06c..53588abc72a 100644 --- a/HDF5Examples/CXX/TUTR/h5tutr_extend.cpp +++ b/HDF5Examples/CXX/TUTR/h5tutr_extend.cpp @@ -33,8 +33,8 @@ main(void) hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; hsize_t chunk_dims[2] = {2, 5}; int data[3][3] = {{1, 1, 1}, // data to write - {1, 1, 1}, - {1, 1, 1}}; + {1, 1, 1}, + {1, 1, 1}}; // Variables used in extending and writing to the extended portion of dataset diff --git a/HDF5Examples/JAVA/H5T/H5Ex_T_Commit.java b/HDF5Examples/JAVA/H5T/H5Ex_T_Commit.java index cd26a96e25b..379522cd5d2 100644 --- a/HDF5Examples/JAVA/H5T/H5Ex_T_Commit.java +++ b/HDF5Examples/JAVA/H5T/H5Ex_T_Commit.java @@ -73,7 +73,7 @@ private static class Sensor_Datatype { String[] memberNames = {"Serial number", "Location", "Temperature (F)", "Pressure (inHg)"}; long[] memberFileTypes = {HDF5Constants.H5T_STD_I32BE, HDF5Constants.H5T_C_S1, - HDF5Constants.H5T_IEEE_F64BE, HDF5Constants.H5T_IEEE_F64BE}; + HDF5Constants.H5T_IEEE_F64BE, HDF5Constants.H5T_IEEE_F64BE}; static int[] memberStorage = {INTEGERSIZE, MAXSTRINGSIZE, DOUBLESIZE, DOUBLESIZE}; // Data size is the storage size for the members not the object. diff --git a/HDF5Examples/JAVA/H5T/H5Ex_T_Compound.java b/HDF5Examples/JAVA/H5T/H5Ex_T_Compound.java index 21aeabc29f8..5bccd641127 100644 --- a/HDF5Examples/JAVA/H5T/H5Ex_T_Compound.java +++ b/HDF5Examples/JAVA/H5T/H5Ex_T_Compound.java @@ -43,7 +43,7 @@ static class Sensor_Datatype { static String[] memberNames = {"Serial number", "Location", "Temperature (F)", "Pressure (inHg)"}; static long[] memberMemTypes = {HDF5Constants.H5T_NATIVE_INT, HDF5Constants.H5T_C_S1, - HDF5Constants.H5T_NATIVE_DOUBLE, HDF5Constants.H5T_NATIVE_DOUBLE}; + HDF5Constants.H5T_NATIVE_DOUBLE, HDF5Constants.H5T_NATIVE_DOUBLE}; static long[] memberFileTypes = {HDF5Constants.H5T_STD_I32BE, HDF5Constants.H5T_C_S1, HDF5Constants.H5T_IEEE_F64BE, HDF5Constants.H5T_IEEE_F64BE}; static int[] memberStorage = {INTEGERSIZE, MAXSTRINGSIZE, DOUBLESIZE, DOUBLESIZE}; diff --git a/HDF5Examples/JAVA/H5T/H5Ex_T_CompoundAttribute.java b/HDF5Examples/JAVA/H5T/H5Ex_T_CompoundAttribute.java index a33faee9e59..86933ae6684 100644 --- a/HDF5Examples/JAVA/H5T/H5Ex_T_CompoundAttribute.java +++ b/HDF5Examples/JAVA/H5T/H5Ex_T_CompoundAttribute.java @@ -44,7 +44,7 @@ static class Sensor_Datatype { static String[] memberNames = {"Serial number", "Location", "Temperature (F)", "Pressure (inHg)"}; static long[] memberMemTypes = {HDF5Constants.H5T_NATIVE_INT, HDF5Constants.H5T_C_S1, - HDF5Constants.H5T_NATIVE_DOUBLE, HDF5Constants.H5T_NATIVE_DOUBLE}; + HDF5Constants.H5T_NATIVE_DOUBLE, HDF5Constants.H5T_NATIVE_DOUBLE}; static long[] memberFileTypes = {HDF5Constants.H5T_STD_I32BE, HDF5Constants.H5T_C_S1, HDF5Constants.H5T_IEEE_F64BE, HDF5Constants.H5T_IEEE_F64BE}; static int[] memberStorage = {INTEGERSIZE, MAXSTRINGSIZE, DOUBLESIZE, DOUBLESIZE}; diff --git a/HDF5Examples/Using_CMake.txt b/HDF5Examples/Using_CMake.txt index baef3565194..c543300092b 100644 --- a/HDF5Examples/Using_CMake.txt +++ b/HDF5Examples/Using_CMake.txt @@ -90,11 +90,11 @@ These steps are described in more detail below. * MinGW Makefiles * NMake Makefiles * Unix Makefiles - * Visual Studio 15 2017 - * Visual Studio 15 2017 Win64 - * Visual Studio 16 2019 - * Visual Studio 17 2022 - + * Visual Studio 15 + * Visual Studio 15 Win64 + * Visual Studio 17 + * Visual Studio 17 Win64 + * Visual Studio 19 is: * H5EX_BUILD_TESTING:BOOL=ON diff --git a/HDF5Examples/config/cmake-presets/hidden-presets.json b/HDF5Examples/config/cmake-presets/hidden-presets.json index 590e7ec7e1f..d38e802f1f6 100644 --- a/HDF5Examples/config/cmake-presets/hidden-presets.json +++ b/HDF5Examples/config/cmake-presets/hidden-presets.json @@ -10,7 +10,7 @@ "binaryDir": "${sourceParentDir}/build/${presetName}", "installDir": "${sourceParentDir}/install/${presetName}" }, - { + { "name": "ci-x64", "architecture": { "value": "x64", @@ -21,7 +21,15 @@ { "name": "ci-x86", "architecture": { - "value": "x86", + "value": "Win32", + "strategy": "external" + }, + "hidden": true + }, + { + "name": "ci-arm64", + "architecture": { + "value": "ARM64", "strategy": "external" }, "hidden": true @@ -47,25 +55,41 @@ "CMAKE_C_COMPILER": "cl", "CMAKE_CXX_COMPILER": "cl" }, - "toolset": { - "value": "host=x64", - "strategy": "external" - }, "condition": { "type": "equals", "lhs": "${hostSystemName}", "rhs": "Windows" } }, + { + "name": "ci-macos-arm64", + "hidden": true, + "cacheVariables": { + "CMAKE_OSX_ARCHITECTURES": "arm64" + }, + "condition": { + "type": "equals", + "lhs": "${hostSystemName}", + "rhs": "Darwin" + } + }, + { + "name": "ci-macos-x86_64", + "hidden": true, + "cacheVariables": { + "CMAKE_OSX_ARCHITECTURES": "x86_64" + }, + "condition": { + "type": "equals", + "lhs": "${hostSystemName}", + "rhs": "Darwin" + } + }, { "name": "ci-Clang", "hidden": true, "cacheVariables": { "CMAKE_TOOLCHAIN_FILE": "config/toolchain/clang.cmake" - }, - "toolset": { - "value": "host=x64", - "strategy": "external" } }, { @@ -78,21 +102,44 @@ "type": "equals", "lhs": "${hostSystemName}", "rhs": "Linux" - }, - "toolset": { - "value": "host=x64", - "strategy": "external" } }, { "name": "ci-Intel", + "hidden": true + }, + { + "name": "ci-Fortran", + "hidden": true, + "cacheVariables": { + "H5EX_BUILD_FORTRAN": "ON" + } + }, + { + "name": "ci-Fortran-Clang", "hidden": true, + "inherits": "ci-Fortran", "cacheVariables": { - "CMAKE_TOOLCHAIN_FILE": "config/toolchain/intel.cmake" + "CMAKE_Fortran_COMPILER": {"type": "FILEPATH", "value": "gfortran"} }, - "toolset": { - "value": "host=x64", - "strategy": "external" + "condition": { + "type": "matches", + "string": "${presetName}", + "regex": ".*-Clang" + } + }, + { + "name": "ci-CPP", + "hidden": true, + "cacheVariables": { + "H5EX_BUILD_CPP_LIB": "ON" + } + }, + { + "name": "ci-Java", + "hidden": true, + "cacheVariables": { + "H5EX_BUILD_JAVA": "ON" } }, { @@ -161,6 +208,50 @@ "ci-GNUC" ] }, + { + "name": "ci-macos-arm64-Debug-Clang", + "description": "Clang/LLVM for x64 (Debug)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-macos-arm64", + "ci-Debug", + "ci-Clang" + ] + }, + { + "name": "ci-macos-arm64-Release-Clang", + "description": "Clang/LLVM for x64 (Release)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-macos-arm64", + "ci-Release", + "ci-Clang" + ] + }, + { + "name": "ci-macos-arm64-Debug-GNUC", + "description": "GNUC for x64 (Debug)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-macos-arm64", + "ci-Debug", + "ci-GNUC" + ] + }, + { + "name": "ci-macos-arm64-Release-GNUC", + "description": "GNUC for x64 (Release)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-macos-arm64", + "ci-Release", + "ci-GNUC" + ] + }, { "name": "ci-x64-Debug-Intel", "description": "Intel for x64 (Debug)", @@ -242,6 +333,38 @@ "ci-base" ] }, + { + "name": "ci-macos-arm64-Debug-Clang", + "configurePreset": "ci-macos-arm64-Debug-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-arm64-Release-Clang", + "configurePreset": "ci-macos-arm64-Release-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-arm64-Debug-GNUC", + "configurePreset": "ci-macos-arm64-Debug-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-arm64-Release-GNUC", + "configurePreset": "ci-macos-arm64-Release-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, { "name": "ci-x64-Debug-Intel", "configurePreset": "ci-x64-Debug-Intel", @@ -327,6 +450,38 @@ "ci-base" ] }, + { + "name": "ci-macos-Debug-Clang", + "configurePreset": "ci-macos-Debug-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-arm64-Release-Clang", + "configurePreset": "ci-macos-arm64-Release-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-arm64-Debug-GNUC", + "configurePreset": "ci-macos-arm64-Debug-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-arm64-Release-GNUC", + "configurePreset": "ci-macos-arm64-Release-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, { "name": "ci-x64-Debug-Intel", "configurePreset": "ci-x64-Debug-Intel", @@ -371,6 +526,18 @@ "hidden": true, "inherits": "ci-base" }, + { + "name": "ci-macos-arm64-Release-Clang", + "configurePreset": "ci-macos-arm64-Release-Clang", + "hidden": true, + "inherits": "ci-base" + }, + { + "name": "ci-macos-arm64-Release-GNUC", + "configurePreset": "ci-macos-arm64-Release-GNUC", + "hidden": true, + "inherits": "ci-base" + }, { "name": "ci-x64-Release-Intel", "configurePreset": "ci-x64-Release-Intel", diff --git a/HDF5Examples/config/cmake/HDFExampleMacros.cmake b/HDF5Examples/config/cmake/HDFExampleMacros.cmake index bbb042177a3..dd2c46fba67 100644 --- a/HDF5Examples/config/cmake/HDFExampleMacros.cmake +++ b/HDF5Examples/config/cmake/HDFExampleMacros.cmake @@ -137,7 +137,7 @@ macro (HDF5_SUPPORT) set (FIND_HDF_COMPONENTS C shared) else () set (FIND_HDF_COMPONENTS C static) - set (HDEXF_BUILD_JAVA OFF CACHE BOOL "Build Java support" FORCE) + set (H5EX_BUILD_JAVA OFF CACHE BOOL "Build Java support" FORCE) message (STATUS "Using static HDF5 - disable build of Java examples") endif () if (H5EX_BUILD_FORTRAN) diff --git a/README.md b/README.md index 7c1173f6868..37c45ad022d 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,10 @@ -HDF5 version 1.15.0 currently under development +HDF5 version 1.17.0 currently under development ![HDF5 Logo](doxygen/img/HDF5.png) [![develop cmake build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/cmake.yml?branch=develop&label=HDF5%20develop%20CMake%20CI)](https://github.com/HDFGroup/hdf5/actions/workflows/cmake.yml?query=branch%3Adevelop) [![develop autotools build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/autotools.yml?branch=develop&label=HDF5%20develop%20Autotools%20CI)](https://github.com/HDFGroup/hdf5/actions/workflows/autotools.yml?query=branch%3Adevelop) +[![HDF-EOS5 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/hdfeos5.yml?branch=develop&label=HDF-EOS5)](https://github.com/HDFGroup/hdf5/actions/workflows/hdfeos5.yml?query=branch%3Adevelop) [![netCDF build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/netcdf.yml?branch=develop&label=netCDF)](https://github.com/HDFGroup/hdf5/actions/workflows/netcdf.yml?query=branch%3Adevelop) [![h5py build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/h5py.yml?branch=develop&label=h5py)](https://github.com/HDFGroup/hdf5/actions/workflows/h5py.yml?query=branch%3Adevelop) [![CVE regression](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/cve.yml?branch=develop&label=CVE)](https://github.com/HDFGroup/hdf5/actions/workflows/cve.yml?query=branch%3Adevelop) @@ -28,17 +29,17 @@ tools, and services at [The HDF Group's website](https://www.hdfgroup.org/). DOCUMENTATION ------------- -This release is fully functional for the API described in the documentation. +Documentation for all HDF software is available at: - https://hdfgroup.github.io/hdf5/develop/_l_b_a_p_i.html + https://support.hdfgroup.org/documentation/index.html -Full Documentation and Programming Resources for this release can be found at +Documentation for the current HDF5 library release is available at: - https://hdfgroup.github.io/hdf5/develop/index.html + https://support.hdfgroup.org/releases/hdf5/latest-docs.html -The latest doxygen documentation generated on changes to develop is available at: +The latest Doxygen documentation generated on changes to `develop`, which does **not** correspond to any particular library release, is available at: - https://hdfgroup.github.io/hdf5/develop + https://hdfgroup.github.io/hdf5/develop See the [RELEASE.txt](release_docs/RELEASE.txt) file in the [release_docs/](release_docs/) directory for information specific to the features and updates included in this release of the library. diff --git a/bin/genparser b/bin/genparser index 619dbfaa3ed..3e113739447 100755 --- a/bin/genparser +++ b/bin/genparser @@ -200,14 +200,14 @@ if [ "$verbose" = true ] ; then fi ${HDF5_FLEX} --nounistd -PH5LTyy -o ${path_to_hl_src}/H5LTanalyze.c ${path_to_hl_src}/H5LTanalyze.l -# fix H5LTparse.c and H5LTlparse.h to declare H5LTyyparse return type as an -# hid_t instead of int. Currently the generated function H5LTyyparse is -# generated with a return value of type int, which is a mapping to the +# Fix H5LTparse.c and H5LTparse.h to declare H5LTyyparse return type as an +# hid_t instead of int. Currently, the H5LTyyparse function is generated +# with a return value of type int, which is a mapping to the # flex yyparse function. The return value in the HL library should be # an hid_t. -# I propose to not use flex to generate this function, but for now I am -# adding a perl command to find and replace this function declaration in -# H5LTparse.c. +# Use Perl command to find and replace this function declaration +# in H5LTparse.c. This is a temporary solution until a method that does not +# use flex is implemented. perl -0777 -pi -e 's/int yyparse/hid_t yyparse/igs' ${path_to_hl_src}/H5LTparse.c perl -0777 -pi -e 's/int\nyyparse/hid_t\nyyparse/igs' ${path_to_hl_src}/H5LTparse.c perl -0777 -pi -e 's/int H5LTyyparse/hid_t H5LTyyparse/igs' ${path_to_hl_src}/H5LTparse.c diff --git a/c++/src/H5DataType.cpp b/c++/src/H5DataType.cpp index 13b0decc022..98b7920b0a0 100644 --- a/c++/src/H5DataType.cpp +++ b/c++/src/H5DataType.cpp @@ -91,8 +91,8 @@ DataType::DataType(const H5T_class_t type_class, size_t size) ///\exception H5::ReferenceException //-------------------------------------------------------------------------- DataType::DataType(const H5Location &loc, const void *ref, H5R_type_t ref_type, const PropList &plist) - : H5Object(), id{H5Location::p_dereference(loc.getId(), ref, ref_type, plist, - "constructor - by dereference")}, + : H5Object(), + id{H5Location::p_dereference(loc.getId(), ref, ref_type, plist, "constructor - by dereference")}, encoded_buf(NULL), buf_size(0) { } diff --git a/c++/src/H5FaccProp.cpp b/c++/src/H5FaccProp.cpp index dc4b949b5b3..12af96db666 100644 --- a/c++/src/H5FaccProp.cpp +++ b/c++/src/H5FaccProp.cpp @@ -713,6 +713,8 @@ FileAccPropList::getFileLocking(hbool_t &use_file_locking, hbool_t &ignore_when_ /// \li \c H5F_LIBVER_110 /// \li \c H5F_LIBVER_112 /// \li \c H5F_LIBVER_114 +/// \li \c H5F_LIBVER_116 +/// \li \c H5F_LIBVER_118 /// \li \c H5F_LIBVER_LATEST /// /// Valid values of \a libver_high are as follows: @@ -720,6 +722,8 @@ FileAccPropList::getFileLocking(hbool_t &use_file_locking, hbool_t &ignore_when_ /// \li \c H5F_LIBVER_110 /// \li \c H5F_LIBVER_112 /// \li \c H5F_LIBVER_114 +/// \li \c H5F_LIBVER_116 +/// \li \c H5F_LIBVER_118 /// \li \c H5F_LIBVER_LATEST (Default) /// /// For more detail, please refer to the H5Pset_libver_bounds API in @@ -751,6 +755,8 @@ FileAccPropList::setLibverBounds(H5F_libver_t libver_low, H5F_libver_t libver_hi /// \li \c H5F_LIBVER_110 /// \li \c H5F_LIBVER_112 /// \li \c H5F_LIBVER_114 +/// \li \c H5F_LIBVER_116 +/// \li \c H5F_LIBVER_118 /// \li \c H5F_LIBVER_LATEST /// /// and \a libver_high: @@ -758,6 +764,8 @@ FileAccPropList::setLibverBounds(H5F_libver_t libver_low, H5F_libver_t libver_hi /// \li \c H5F_LIBVER_110 /// \li \c H5F_LIBVER_112 /// \li \c H5F_LIBVER_114 +/// \li \c H5F_LIBVER_116 +/// \li \c H5F_LIBVER_118 /// \li \c H5F_LIBVER_LATEST //-------------------------------------------------------------------------- void diff --git a/c++/test/dsets.cpp b/c++/test/dsets.cpp index 65dabbe11c2..625e97f0d72 100644 --- a/c++/test/dsets.cpp +++ b/c++/test/dsets.cpp @@ -1451,7 +1451,7 @@ test_read_string(H5File &file) *------------------------------------------------------------------------- */ extern "C" void -test_dset() +test_dset(const void *params) { hid_t fapl_id; fapl_id = h5_fileaccess(); // in h5test.c, returns a file access template @@ -1492,9 +1492,6 @@ test_dset() catch (Exception &E) { test_report(nerrors, H5std_string(" Dataset")); } - - // Clean up data file - cleanup_dsets(); } // test_dset /*------------------------------------------------------------------------- @@ -1506,8 +1503,10 @@ test_dset() *------------------------------------------------------------------------- */ extern "C" void -cleanup_dsets() +cleanup_dsets(void *params) { - HDremove(FILE1.c_str()); - HDremove(FILE_ACCPLIST.c_str()); + if (GetTestCleanup()) { + HDremove(FILE1.c_str()); + HDremove(FILE_ACCPLIST.c_str()); + } } // cleanup_dsets diff --git a/c++/test/h5cpputil.h b/c++/test/h5cpputil.h index fa6822ad172..6c8560d5c8d 100644 --- a/c++/test/h5cpputil.h +++ b/c++/test/h5cpputil.h @@ -22,6 +22,7 @@ #define H5cpputil_H #include "h5test.h" +#include "testframe.h" using namespace H5; using std::cerr; @@ -29,8 +30,8 @@ using std::endl; #define MESSAGE(V, A) \ do { \ - if (HDGetTestVerbosity() > (V)) \ - print_func A; \ + if (GetTestVerbosity() > (V)) \ + printf A; \ } while (0) #define SUBTEST(TEST) \ do { \ @@ -142,36 +143,36 @@ verify_val(Type1 x, Type2 value, float epsilon, const char *msg, int line, const #ifdef __cplusplus extern "C" { #endif -void test_array(); -void test_attr(); -void test_compound(); -void test_dsproplist(); -void test_file(); -void test_filters(); -void test_links(); -void test_h5s(); -void test_iterate(); -void test_object(); -void test_reference(); -void test_types(); -void test_vlstrings(); -void test_dset(); +void test_array(const void *params); +void test_attr(const void *params); +void test_compound(const void *params); +void test_dsproplist(const void *params); +void test_file(const void *params); +void test_filters(const void *params); +void test_links(const void *params); +void test_h5s(const void *params); +void test_iterate(const void *params); +void test_object(const void *params); +void test_reference(const void *params); +void test_types(const void *params); +void test_vlstrings(const void *params); +void test_dset(const void *params); /* Prototypes for the cleanup routines */ -void cleanup_array(); -void cleanup_attr(); -void cleanup_compound(); -void cleanup_dsproplist(); -void cleanup_dsets(); -void cleanup_file(); -void cleanup_filters(); -void cleanup_h5s(); -void cleanup_iterate(); -void cleanup_links(); -void cleanup_object(); -void cleanup_reference(); -void cleanup_types(); -void cleanup_vlstrings(); +void cleanup_array(void *params); +void cleanup_attr(void *params); +void cleanup_compound(void *params); +void cleanup_dsproplist(void *params); +void cleanup_dsets(void *params); +void cleanup_file(void *params); +void cleanup_filters(void *params); +void cleanup_h5s(void *params); +void cleanup_iterate(void *params); +void cleanup_links(void *params); +void cleanup_object(void *params); +void cleanup_reference(void *params); +void cleanup_types(void *params); +void cleanup_vlstrings(void *params); #ifdef __cplusplus } diff --git a/c++/test/tarray.cpp b/c++/test/tarray.cpp index 2d14264f006..3ee94d24f59 100644 --- a/c++/test/tarray.cpp +++ b/c++/test/tarray.cpp @@ -477,7 +477,7 @@ test_array_info() *------------------------------------------------------------------------- */ extern "C" void -test_array() +test_array(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing Array Datatypes\n")); @@ -502,7 +502,9 @@ test_array() *------------------------------------------------------------------------- */ extern "C" void -cleanup_array() +cleanup_array(void *params) { - HDremove(FILENAME.c_str()); + if (GetTestCleanup()) { + HDremove(FILENAME.c_str()); + } } // cleanup_array diff --git a/c++/test/tattr.cpp b/c++/test/tattr.cpp index 5135e0c7fa7..07ab0a6d777 100644 --- a/c++/test/tattr.cpp +++ b/c++/test/tattr.cpp @@ -1956,7 +1956,7 @@ test_attr_corder_create_basic(FileCreatPropList &fcpl, FileAccPropList &fapl) *------------------------------------------------------------------------- */ extern "C" void -test_attr() +test_attr(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing Attributes\n")); @@ -2041,12 +2041,14 @@ test_attr() *------------------------------------------------------------------------- */ extern "C" void -cleanup_attr() +cleanup_attr(void *params) { - HDremove(FILE_BASIC.c_str()); - HDremove(FILE_COMPOUND.c_str()); - HDremove(FILE_SCALAR.c_str()); - HDremove(FILE_MULTI.c_str()); - HDremove(FILE_DTYPE.c_str()); - HDremove(FILE_CRTPROPS.c_str()); + if (GetTestCleanup()) { + HDremove(FILE_BASIC.c_str()); + HDremove(FILE_COMPOUND.c_str()); + HDremove(FILE_SCALAR.c_str()); + HDremove(FILE_MULTI.c_str()); + HDremove(FILE_DTYPE.c_str()); + HDremove(FILE_CRTPROPS.c_str()); + } } diff --git a/c++/test/tcompound.cpp b/c++/test/tcompound.cpp index 53939dd1eb8..dada2c075a8 100644 --- a/c++/test/tcompound.cpp +++ b/c++/test/tcompound.cpp @@ -729,7 +729,7 @@ test_compound_set_size() *------------------------------------------------------------------------- */ extern "C" void -test_compound() +test_compound(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing Compound Data Type operations\n")); @@ -753,7 +753,9 @@ test_compound() *------------------------------------------------------------------------- */ extern "C" void -cleanup_compound() +cleanup_compound(void *params) { - HDremove(COMPFILE.c_str()); + if (GetTestCleanup()) { + HDremove(COMPFILE.c_str()); + } } // cleanup_file diff --git a/c++/test/tdspl.cpp b/c++/test/tdspl.cpp index bccc41a568f..90b558fdbbf 100644 --- a/c++/test/tdspl.cpp +++ b/c++/test/tdspl.cpp @@ -114,7 +114,7 @@ test_transfplist() *------------------------------------------------------------------------- */ extern "C" void -test_dsproplist() +test_dsproplist(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing Generic Dataset Property Lists\n")); @@ -132,7 +132,9 @@ test_dsproplist() *------------------------------------------------------------------------- */ extern "C" void -cleanup_dsproplist() +cleanup_dsproplist(void *params) { - HDremove(FILENAME.c_str()); + if (GetTestCleanup()) { + HDremove(FILENAME.c_str()); + } } diff --git a/c++/test/testhdf5.cpp b/c++/test/testhdf5.cpp index 518227dd26c..1df73baaf19 100644 --- a/c++/test/testhdf5.cpp +++ b/c++/test/testhdf5.cpp @@ -32,8 +32,6 @@ PerformTests() -- Perform requested testing GetTestSummary() -- Retrieve Summary request value TestSummary() -- Display test summary - GetTestCleanup() -- Retrieve Cleanup request value - TestCleanup() -- Clean up files from testing GetTestNumErrs() -- Retrieve the number of testing errors ***************************************************************************/ @@ -57,42 +55,42 @@ main(int argc, char *argv[]) // caused deliberately and expected. Exception::dontPrint(); /* Initialize testing framework */ - TestInit(argv[0], NULL, NULL); + TestInit(argv[0], NULL, NULL, NULL, NULL, 0); // testing file creation and opening in tfile.cpp - AddTest("tfile", test_file, cleanup_file, "File I/O Operations", NULL); + AddTest("tfile", test_file, NULL, cleanup_file, NULL, 0, "File I/O Operations"); // testing dataset functionalities in dset.cpp - AddTest("dsets", test_dset, cleanup_dsets, "Dataset I/O Operations", NULL); + AddTest("dsets", test_dset, NULL, cleanup_dsets, NULL, 0, "Dataset I/O Operations"); // testing dataspace functionalities in th5s.cpp - AddTest("th5s", test_h5s, cleanup_h5s, "Dataspaces", NULL); + AddTest("th5s", test_h5s, NULL, cleanup_h5s, NULL, 0, "Dataspaces"); // testing attribute functionalities in tattr.cpp - AddTest("tattr", test_attr, cleanup_attr, "Attributes", NULL); + AddTest("tattr", test_attr, NULL, cleanup_attr, NULL, 0, "Attributes"); // testing object functionalities in tobject.cpp - AddTest("tobject", test_object, cleanup_object, "Objects", NULL); + AddTest("tobject", test_object, NULL, cleanup_object, NULL, 0, "Objects"); // testing reference functionalities in trefer.cpp - AddTest("trefer", test_reference, cleanup_reference, "References", NULL); + AddTest("trefer", test_reference, NULL, cleanup_reference, NULL, 0, "References"); // testing variable-length strings in tvlstr.cpp - AddTest("tvlstr", test_vlstrings, cleanup_vlstrings, "Variable-Length Strings", NULL); - AddTest("ttypes", test_types, cleanup_types, "Generic Data Types", NULL); - AddTest("tarray", test_array, cleanup_array, "Array Datatypes", NULL); - AddTest("tcompound", test_compound, cleanup_compound, "Compound Data Types", NULL); - AddTest("tdspl", test_dsproplist, cleanup_dsproplist, "Dataset Property List", NULL); - AddTest("tfilter", test_filters, cleanup_filters, "Various Filters", NULL); - AddTest("tlinks", test_links, cleanup_links, "Various Links", NULL); + AddTest("tvlstr", test_vlstrings, NULL, cleanup_vlstrings, NULL, 0, "Variable-Length Strings"); + AddTest("ttypes", test_types, NULL, cleanup_types, NULL, 0, "Generic Data Types"); + AddTest("tarray", test_array, NULL, cleanup_array, NULL, 0, "Array Datatypes"); + AddTest("tcompound", test_compound, NULL, cleanup_compound, NULL, 0, "Compound Data Types"); + AddTest("tdspl", test_dsproplist, NULL, cleanup_dsproplist, NULL, 0, "Dataset Property List"); + AddTest("tfilter", test_filters, NULL, cleanup_filters, NULL, 0, "Various Filters"); + AddTest("tlinks", test_links, NULL, cleanup_links, NULL, 0, "Various Links"); /* Comment out tests that are not done yet. - BMR, Feb 2001 - AddTest("select", test_select, cleanup_select, "Selections", NULL); - AddTest("time", test_time, cleanup_time, "Time Datatypes", NULL); - AddTest("vltypes", test_vltypes, cleanup_vltypes, "Variable-Length Datatypes", NULL); + AddTest("select", test_select, NULL, cleanup_select, NULL, 0, "Selections"); + AddTest("time", test_time, NULL, cleanup_time, NULL, 0, "Time Datatypes"); + AddTest("vltypes", test_vltypes, NULL, cleanup_vltypes, NULL, 0, "Variable-Length Datatypes"); */ - AddTest("iterate", test_iterate, cleanup_iterate, "Group & Attribute Iteration", NULL); + AddTest("iterate", test_iterate, NULL, cleanup_iterate, NULL, 0, "Group & Attribute Iteration"); /* - AddTest("genprop", test_genprop, cleanup_genprop, "Generic Properties", NULL); - AddTest("id", test_ids, NULL, "User-Created Identifiers", NULL); + AddTest("genprop", test_genprop, NULL, cleanup_genprop, NULL, 0, "Generic Properties"); + AddTest("id", test_ids, NULL, NULL, NULL, 0, "User-Created Identifiers"); Comment out tests that are not done yet */ /* Tentative - BMR 2007/1/12 - AddTest("enum", test_enum, cleanup_enum, "Enum Data Types", NULL); + AddTest("enum", test_enum, NULL, cleanup_enum, NULL, 0, "Enum Data Types"); */ } catch (Exception &E) { @@ -100,7 +98,7 @@ main(int argc, char *argv[]) } /* Display testing information */ - TestInfo(argv[0]); + TestInfo(stdout); /* Parse command line arguments */ TestParseCmdLine(argc, argv); @@ -110,11 +108,7 @@ main(int argc, char *argv[]) /* Display test summary, if requested */ if (GetTestSummary()) - TestSummary(); - - /* Clean up test files, if allowed */ - if (GetTestCleanup() && !getenv(HDF5_NOCLEANUP)) - TestCleanup(); + TestSummary(stdout); /* Release test infrastructure */ TestShutdown(); diff --git a/c++/test/tfile.cpp b/c++/test/tfile.cpp index 212a241a3d9..bd7d96422e8 100644 --- a/c++/test/tfile.cpp +++ b/c++/test/tfile.cpp @@ -968,7 +968,7 @@ test_file_info() *------------------------------------------------------------------------- */ extern "C" void -test_file() +test_file(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing File I/O Operations\n")); @@ -996,13 +996,15 @@ test_file() extern "C" #endif void - cleanup_file() + cleanup_file(void *params) { - HDremove(FILE1.c_str()); - HDremove(FILE2.c_str()); - HDremove(FILE3.c_str()); - HDremove(FILE4.c_str()); - HDremove(FILE5.c_str()); - HDremove(FILE6.c_str()); - HDremove(FILE7.c_str()); + if (GetTestCleanup()) { + HDremove(FILE1.c_str()); + HDremove(FILE2.c_str()); + HDremove(FILE3.c_str()); + HDremove(FILE4.c_str()); + HDremove(FILE5.c_str()); + HDremove(FILE6.c_str()); + HDremove(FILE7.c_str()); + } } // cleanup_file diff --git a/c++/test/tfilter.cpp b/c++/test/tfilter.cpp index 26be0681c2d..d66a7b5894c 100644 --- a/c++/test/tfilter.cpp +++ b/c++/test/tfilter.cpp @@ -227,7 +227,7 @@ test_szip_filter(H5File &file1) */ const H5std_string FILE1("tfilters.h5"); extern "C" void -test_filters() +test_filters(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing Various Filters\n")); @@ -259,7 +259,9 @@ test_filters() *------------------------------------------------------------------------- */ extern "C" void -cleanup_filters() +cleanup_filters(void *params) { - HDremove(FILE1.c_str()); + if (GetTestCleanup()) { + HDremove(FILE1.c_str()); + } } diff --git a/c++/test/th5s.cpp b/c++/test/th5s.cpp index 04bc3c0b188..34fb32d4cea 100644 --- a/c++/test/th5s.cpp +++ b/c++/test/th5s.cpp @@ -471,7 +471,7 @@ test_h5s_compound_scalar_read() *------------------------------------------------------------------------- */ extern "C" void -test_h5s() +test_h5s(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing Dataspaces\n")); @@ -493,7 +493,9 @@ test_h5s() *------------------------------------------------------------------------- */ extern "C" void -cleanup_h5s() +cleanup_h5s(void *params) { - HDremove(DATAFILE.c_str()); + if (GetTestCleanup()) { + HDremove(DATAFILE.c_str()); + } } // cleanup_h5s diff --git a/c++/test/titerate.cpp b/c++/test/titerate.cpp index b9196d43976..2f989ae40f0 100644 --- a/c++/test/titerate.cpp +++ b/c++/test/titerate.cpp @@ -30,17 +30,17 @@ using namespace H5; #define NDATASETS 50 /* Number of attributes for attribute iteration test */ -//#define NATTR 50 +// #define NATTR 50 /* Number of groups for second group iteration test */ -//#define ITER_NGROUPS 150 +// #define ITER_NGROUPS 150 /* General maximum length of names used */ #define NAMELEN 80 /* 1-D dataset with fixed dimensions */ -//#define SPACE1_RANK 1 -//#define SPACE1_DIM1 4 +// #define SPACE1_RANK 1 +// #define SPACE1_DIM1 4 const H5std_string FILE_ITERATE("titerate.h5"); const H5std_string GROUP1("Top Group"); @@ -440,7 +440,7 @@ test_HDFFV_9920() *------------------------------------------------------------------------- */ extern "C" void -test_iterate() +test_iterate(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing Iterate Feature\n")); @@ -464,8 +464,10 @@ test_iterate() *------------------------------------------------------------------------- */ extern "C" void -cleanup_iterate() +cleanup_iterate(void *params) { - HDremove(FILE_ITERATE.c_str()); - HDremove(FILE_NAME.c_str()); + if (GetTestCleanup()) { + HDremove(FILE_ITERATE.c_str()); + HDremove(FILE_NAME.c_str()); + } } // cleanup_iterate diff --git a/c++/test/tlinks.cpp b/c++/test/tlinks.cpp index 3ce8c6823d3..a6d68631fc4 100644 --- a/c++/test/tlinks.cpp +++ b/c++/test/tlinks.cpp @@ -709,7 +709,7 @@ test_visit(hid_t fapl_id, hbool_t new_format) *------------------------------------------------------------------------- */ extern "C" void -test_links() +test_links(const void *params) { hid_t fapl_id, fapl2_id; /* File access property lists */ unsigned new_format; /* Whether to use the new format or not */ @@ -769,8 +769,10 @@ test_links() *------------------------------------------------------------------------- */ extern "C" void -cleanup_links() +cleanup_links(void *params) { - HDremove(FILENAME[0]); - HDremove(FILENAME[1]); + if (GetTestCleanup()) { + HDremove(FILENAME[0]); + HDremove(FILENAME[1]); + } } diff --git a/c++/test/tobject.cpp b/c++/test/tobject.cpp index 0affed1f838..cec13323544 100644 --- a/c++/test/tobject.cpp +++ b/c++/test/tobject.cpp @@ -718,7 +718,7 @@ test_intermediate_groups() *------------------------------------------------------------------------- */ extern "C" void -test_object() +test_object(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing Object Functions\n")); @@ -742,10 +742,12 @@ test_object() *------------------------------------------------------------------------- */ extern "C" void -cleanup_object() +cleanup_object(void *params) { - HDremove(FILE_OBJECTS.c_str()); - HDremove(FILE_OBJHDR.c_str()); - HDremove(FILE_OBJINFO.c_str()); - HDremove(FILE_INTERGRPS.c_str()); + if (GetTestCleanup()) { + HDremove(FILE_OBJECTS.c_str()); + HDremove(FILE_OBJHDR.c_str()); + HDremove(FILE_OBJINFO.c_str()); + HDremove(FILE_INTERGRPS.c_str()); + } } // cleanup_objects diff --git a/c++/test/trefer.cpp b/c++/test/trefer.cpp index 31c50229a1e..627330f4660 100644 --- a/c++/test/trefer.cpp +++ b/c++/test/trefer.cpp @@ -827,7 +827,7 @@ test_reference_region_1D() *------------------------------------------------------------------------- */ extern "C" void -test_reference() +test_reference(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing References\n")); @@ -848,8 +848,10 @@ test_reference() *------------------------------------------------------------------------- */ extern "C" void -cleanup_reference() +cleanup_reference(void *params) { - HDremove(FILE1.c_str()); - HDremove(FILE2.c_str()); + if (GetTestCleanup()) { + HDremove(FILE1.c_str()); + HDremove(FILE2.c_str()); + } } diff --git a/c++/test/ttypes.cpp b/c++/test/ttypes.cpp index 322b72f444e..c504635b6ca 100644 --- a/c++/test/ttypes.cpp +++ b/c++/test/ttypes.cpp @@ -1093,7 +1093,7 @@ test_operators() *------------------------------------------------------------------------- */ extern "C" void -test_types() +test_types(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing Generic Data Types\n")); @@ -1120,8 +1120,10 @@ test_types() *------------------------------------------------------------------------- */ extern "C" void -cleanup_types() +cleanup_types(void *params) { - for (int i = 0; i < 6; i++) - HDremove(FILENAME[i]); + if (GetTestCleanup()) { + for (int i = 0; i < 6; i++) + HDremove(FILENAME[i]); + } } // cleanup_types diff --git a/c++/test/tvlstr.cpp b/c++/test/tvlstr.cpp index 33710a3ba17..ba39620334a 100644 --- a/c++/test/tvlstr.cpp +++ b/c++/test/tvlstr.cpp @@ -934,7 +934,7 @@ test_vl_rewrite() *------------------------------------------------------------------------- */ extern "C" void -test_vlstrings() +test_vlstrings(const void *params) { // Output message about test being performed MESSAGE(5, ("Testing Variable-Length Strings")); @@ -967,8 +967,10 @@ test_vlstrings() *------------------------------------------------------------------------- */ extern "C" void -cleanup_vlstrings() +cleanup_vlstrings(void *params) { - HDremove(FILENAME.c_str()); - HDremove(FILENAME2.c_str()); + if (GetTestCleanup()) { + HDremove(FILENAME.c_str()); + HDremove(FILENAME2.c_str()); + } } diff --git a/config/cmake/H5pubconf.h.in b/config/cmake/H5pubconf.h.in index c3e92eba66b..9f0d0bc1505 100644 --- a/config/cmake/H5pubconf.h.in +++ b/config/cmake/H5pubconf.h.in @@ -607,6 +607,9 @@ /* Define using v1.16 public API symbols by default */ #cmakedefine H5_USE_116_API_DEFAULT @H5_USE_116_API_DEFAULT@ +/* Define using v1.18 public API symbols by default */ +#cmakedefine H5_USE_118_API_DEFAULT @H5_USE_118_API_DEFAULT@ + /* Define if the library will use file locking */ #cmakedefine H5_USE_FILE_LOCKING @H5_USE_FILE_LOCKING@ diff --git a/config/cmake/HDF5ExampleCache.cmake b/config/cmake/HDF5ExampleCache.cmake index 99232cc06ca..219d1c59902 100644 --- a/config/cmake/HDF5ExampleCache.cmake +++ b/config/cmake/HDF5ExampleCache.cmake @@ -38,6 +38,8 @@ elseif (DEFAULT_API_VERSION MATCHES "v114") set (H5_USE_114_API ON) elseif (DEFAULT_API_VERSION MATCHES "v116") set (H5_USE_116_API ON) +elseif (DEFAULT_API_VERSION MATCHES "v118") + set (H5_USE_118_API ON) endif () message (STATUS "HDF5 H5_LIBVER_DIR: ${H5_LIBVER_DIR} HDF5_API_VERSION: ${DEFAULT_API_VERSION}") diff --git a/config/cmake/LIBAEC/CMakeLists.txt b/config/cmake/LIBAEC/CMakeLists.txt index 5d978275565..2ac4f2c7ad3 100644 --- a/config/cmake/LIBAEC/CMakeLists.txt +++ b/config/cmake/LIBAEC/CMakeLists.txt @@ -121,21 +121,6 @@ if (WIN32) add_compile_definitions (_CONSOLE) endif () -#----------------------------------------------------------------------------- -# Compiler specific flags : Shouldn't there be compiler tests for these -#----------------------------------------------------------------------------- -if (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") - set (CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS}") -endif () - -#----------------------------------------------------------------------------- -# This is in here to help some of the GCC based IDES like Eclipse -# and code blocks parse the compiler errors and warnings better. -#----------------------------------------------------------------------------- -if (CMAKE_C_COMPILER_ID STREQUAL "GNU") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fmessage-length=0") -endif () - #----------------------------------------------------------------------------- # Generate the aec_config.h file containing user settings needed by compilation #----------------------------------------------------------------------------- @@ -166,6 +151,12 @@ target_include_directories (${LIBAEC_LIB_TARGET} PUBLIC "$" "$" "$") +if (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + target_compile_options(${LIBAEC_LIB_TARGET} PRIVATE -Wno-deprecated-non-prototype) +endif () +if (CMAKE_C_COMPILER_ID STREQUAL "GNU") + target_compile_options(${LIBAEC_LIB_TARGET} PRIVATE -fmessage-length=0) +endif () TARGET_C_PROPERTIES (${LIBAEC_LIB_TARGET} STATIC) H5_SET_LIB_OPTIONS (${LIBAEC_LIB_TARGET} ${LIBAEC_LIB_NAME} STATIC 0) set_target_properties (${LIBAEC_LIB_TARGET} PROPERTIES diff --git a/config/cmake/examples/CTestScript.cmake b/config/cmake/examples/CTestScript.cmake index b1bfa8a9fc1..5e7259d7cf9 100644 --- a/config/cmake/examples/CTestScript.cmake +++ b/config/cmake/examples/CTestScript.cmake @@ -11,7 +11,7 @@ # cmake_minimum_required (VERSION 3.18) ######################################################## -# For any comments please contact cdashhelp@hdfgroup.org +# For any comments please contact help@hdfgroup.org # ######################################################## # ----------------------------------------------------------- @@ -22,16 +22,19 @@ if (NOT SITE_OS_NAME) ## -- set hostname ## -------------------------- find_program (HOSTNAME_CMD NAMES hostname) - exec_program (${HOSTNAME_CMD} ARGS OUTPUT_VARIABLE HOSTNAME) + execute_process (COMMAND ${HOSTNAME_CMD} OUTPUT_VARIABLE HOSTNAME OUTPUT_STRIP_TRAILING_WHITESPACE) set (CTEST_SITE "${HOSTNAME}${CTEST_SITE_EXT}") find_program (UNAME NAMES uname) macro (getuname name flag) - exec_program ("${UNAME}" ARGS "${flag}" OUTPUT_VARIABLE "${name}") + execute_process (COMMAND "${UNAME}" "${flag}" OUTPUT_VARIABLE "${name}" OUTPUT_STRIP_TRAILING_WHITESPACE) endmacro () getuname (osname -s) + string(STRIP ${osname} osname) getuname (osrel -r) + string(STRIP ${osrel} osrel) getuname (cpu -m) + string(STRIP ${cpu} cpu) message (STATUS "Dashboard script uname output: ${osname}-${osrel}-${cpu}\n") set (CTEST_BUILD_NAME "${osname}-${osrel}-${cpu}") @@ -50,15 +53,15 @@ endif () set (BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}") # Launchers work only with Makefile and Ninja generators. -if(NOT "${CTEST_CMAKE_GENERATOR}" MATCHES "Make|Ninja" OR LOCAL_SKIP_TEST) - set(CTEST_USE_LAUNCHERS 0) - set(ENV{CTEST_USE_LAUNCHERS_DEFAULT} 0) - set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=OFF") -else() - set(CTEST_USE_LAUNCHERS 1) - set(ENV{CTEST_USE_LAUNCHERS_DEFAULT} 1) - set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON") -endif() +if (NOT "${CTEST_CMAKE_GENERATOR}" MATCHES "Make|Ninja" OR LOCAL_SKIP_TEST) + set (CTEST_USE_LAUNCHERS 0) + set (ENV{CTEST_USE_LAUNCHERS_DEFAULT} 0) + set (BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=OFF") +else () + set (CTEST_USE_LAUNCHERS 1) + set (ENV{CTEST_USE_LAUNCHERS_DEFAULT} 1) + set (BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON") +endif () #----------------------------------------------------------------------------- # MacOS machines need special options @@ -77,7 +80,6 @@ endif () set (NEED_REPOSITORY_CHECKOUT 0) set (CTEST_CMAKE_COMMAND "\"${CMAKE_COMMAND}\"") if (CTEST_USE_TAR_SOURCE) - ## Uncompress source if tar or zip file provided ## -------------------------- if (WIN32 AND NOT MINGW) message (STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip]") @@ -98,10 +100,10 @@ endif () ## Clear the build directory ## -------------------------- set (CTEST_START_WITH_EMPTY_BINARY_DIRECTORY TRUE) -if (EXISTS "${CTEST_BINARY_DIRECTORY}" AND IS_DIRECTORY "${CTEST_BINARY_DIRECTORY}") - ctest_empty_binary_directory (${CTEST_BINARY_DIRECTORY}) -else () +if (NOT EXISTS "${CTEST_BINARY_DIRECTORY}") file (MAKE_DIRECTORY "${CTEST_BINARY_DIRECTORY}") +else () + ctest_empty_binary_directory (${CTEST_BINARY_DIRECTORY}) endif () # Use multiple CPU cores to build @@ -122,13 +124,15 @@ endif () #----------------------------------------------------------------------------- # Initialize the CTEST commands #------------------------------ -set(CTEST_CONFIGURE_TOOLSET "") -if(CMAKE_GENERATOR_TOOLSET) - set(CTEST_CONFIGURE_TOOLSET "-T${CMAKE_GENERATOR_TOOLSET}") +if (CMAKE_GENERATOR_TOOLSET) + set (CTEST_CONFIGURE_TOOLSET "\"-T${CMAKE_GENERATOR_TOOLSET}\"") +else () + set (CTEST_CONFIGURE_TOOLSET) endif() -set(CTEST_CONFIGURE_ARCHITECTURE "") -if(CMAKE_GENERATOR_ARCHITECTURE) - set(CTEST_CONFIGURE_ARCHITECTURE "-A${CMAKE_GENERATOR_ARCHITECTURE}") +if (CMAKE_GENERATOR_ARCHITECTURE) + set (CTEST_CONFIGURE_ARCHITECTURE "\"-A${CMAKE_GENERATOR_ARCHITECTURE}\"") +else () + set (CTEST_CONFIGURE_ARCHITECTURE) endif() set (CTEST_CONFIGURE_COMMAND "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_CONFIGURE_ARCHITECTURE}\" \"${CTEST_CONFIGURE_TOOLSET}\" \"${CTEST_SOURCE_DIRECTORY}\"" diff --git a/config/cmake/examples/HDF5_Examples.cmake.in b/config/cmake/examples/HDF5_Examples.cmake.in index 962bfea147a..c929df7b802 100644 --- a/config/cmake/examples/HDF5_Examples.cmake.in +++ b/config/cmake/examples/HDF5_Examples.cmake.in @@ -114,7 +114,7 @@ set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PAC #endif() ############################################################################################################### -# For any comments please contact cdashhelp@hdfgroup.org +# For any comments please contact help@hdfgroup.org # ############################################################################################################### diff --git a/config/cmake/scripts/CTestScript.cmake b/config/cmake/scripts/CTestScript.cmake index 46037f573b1..45ca77b73d9 100644 --- a/config/cmake/scripts/CTestScript.cmake +++ b/config/cmake/scripts/CTestScript.cmake @@ -11,8 +11,7 @@ # cmake_minimum_required (VERSION 3.18) ######################################################## -# This dashboard is maintained by The HDF Group -# For any comments please contact cdashhelp@hdfgroup.org +# For any comments please contact help@hdfgroup.org # ######################################################## # ----------------------------------------------------------- @@ -23,16 +22,19 @@ if (NOT SITE_OS_NAME) ## -- set hostname ## -------------------------- find_program (HOSTNAME_CMD NAMES hostname) - exec_program (${HOSTNAME_CMD} ARGS OUTPUT_VARIABLE HOSTNAME) + execute_process (COMMAND ${HOSTNAME_CMD} OUTPUT_VARIABLE HOSTNAME OUTPUT_STRIP_TRAILING_WHITESPACE) set (CTEST_SITE "${HOSTNAME}${CTEST_SITE_EXT}") find_program (UNAME NAMES uname) macro (getuname name flag) - exec_program ("${UNAME}" ARGS "${flag}" OUTPUT_VARIABLE "${name}") + execute_process (COMMAND "${UNAME}" "${flag}" OUTPUT_VARIABLE "${name}" OUTPUT_STRIP_TRAILING_WHITESPACE) endmacro () getuname (osname -s) + string(STRIP ${osname} osname) getuname (osrel -r) + string(STRIP ${osrel} osrel) getuname (cpu -m) + string(STRIP ${cpu} cpu) message (STATUS "Dashboard script uname output: ${osname}-${osrel}-${cpu}\n") set (CTEST_BUILD_NAME "${osname}-${osrel}-${cpu}") @@ -62,7 +64,7 @@ else () endif () #----------------------------------------------------------------------------- -# MAC machines need special option +# MacOS machines need special options #----------------------------------------------------------------------------- if (APPLE) # Compiler choice @@ -78,7 +80,6 @@ endif () set (NEED_REPOSITORY_CHECKOUT 0) set (CTEST_CMAKE_COMMAND "\"${CMAKE_COMMAND}\"") if (CTEST_USE_TAR_SOURCE) - ## Uncompress source if tar file provided ## -------------------------- if (WIN32 AND NOT MINGW) message (STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip]") diff --git a/config/cmake/scripts/HDF5config.cmake b/config/cmake/scripts/HDF5config.cmake index 6ec34815e9f..485871c5816 100644 --- a/config/cmake/scripts/HDF5config.cmake +++ b/config/cmake/scripts/HDF5config.cmake @@ -20,6 +20,7 @@ cmake_minimum_required (VERSION 3.18) # Usage: # ctest -S HDF5config.cmake,OPTION=VALUE -C Release -VV -O test.log # where valid options for OPTION are: +# NINJA - Use Ninja build system # BUILD_GENERATOR - The cmake build generator: # MinGW * MinGW Makefiles # Unix * Unix Makefiles @@ -37,15 +38,15 @@ cmake_minimum_required (VERSION 3.18) # CTEST_SOURCE_NAME - source folder ############################################################################## -set (CTEST_SOURCE_VERSION "1.15.0") +set (CTEST_SOURCE_VERSION "1.17.0") set (CTEST_SOURCE_VERSEXT "") ############################################################################## # handle input parameters to script. #BUILD_GENERATOR - which CMake generator to use, required -#INSTALLDIR - HDF5-1.15.x root folder +#INSTALLDIR - HDF5-1.17.x root folder #CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo -#CTEST_SOURCE_NAME - name of source folder; HDF5-1.15.x +#CTEST_SOURCE_NAME - name of source folder; HDF5-1.17.x #MODEL - CDash group name #HPC - run alternate configurations for HPC machines; sbatch, bsub, raybsub, qsub #MPI - enable MPI @@ -111,85 +112,89 @@ if (NOT DEFINED HPC) set (SITE_OS_NAME "Windows") set (SITE_OS_VERSION "WIN10") if (BUILD_GENERATOR STREQUAL "VS202264") - set (CTEST_CMAKE_GENERATOR "Visual Studio 17 2022") - set (CMAKE_GENERATOR_ARCHITECTURE "x64") + if (DEFINED NINJA) + set (CTEST_CMAKE_GENERATOR "Ninja") + else () + set (CTEST_CMAKE_GENERATOR "Visual Studio 17 2022") + set (CMAKE_GENERATOR_ARCHITECTURE "x64") + endif () set (SITE_OS_BITS "64") set (SITE_COMPILER_NAME "vs2022") set (SITE_COMPILER_VERSION "17") elseif (BUILD_GENERATOR STREQUAL "VS2022") - set (CTEST_CMAKE_GENERATOR "Visual Studio 17 2022") - set (CMAKE_GENERATOR_ARCHITECTURE "Win32") + if (DEFINED NINJA) + set (CTEST_CMAKE_GENERATOR "Ninja") + else () + set (CTEST_CMAKE_GENERATOR "Visual Studio 17 2022") + set (CMAKE_GENERATOR_ARCHITECTURE "Win32") + endif () set (SITE_OS_BITS "32") set (SITE_COMPILER_NAME "vs2022") set (SITE_COMPILER_VERSION "17") elseif (BUILD_GENERATOR STREQUAL "VS201964") - set (CTEST_CMAKE_GENERATOR "Visual Studio 16 2019") - set (CMAKE_GENERATOR_ARCHITECTURE "x64") + if (DEFINED NINJA) + set (CTEST_CMAKE_GENERATOR "Ninja") + else () + set (CTEST_CMAKE_GENERATOR "Visual Studio 16 2019") + set (CMAKE_GENERATOR_ARCHITECTURE "x64") + endif () set (SITE_OS_BITS "64") set (SITE_COMPILER_NAME "vs2019") set (SITE_COMPILER_VERSION "16") elseif (BUILD_GENERATOR STREQUAL "VS2019") - set (CTEST_CMAKE_GENERATOR "Visual Studio 16 2019") - set (CMAKE_GENERATOR_ARCHITECTURE "Win32") + if (DEFINED NINJA) + set (CTEST_CMAKE_GENERATOR "Ninja") + else () + set (CTEST_CMAKE_GENERATOR "Visual Studio 16 2019") + set (CMAKE_GENERATOR_ARCHITECTURE "Win32") + endif () set (SITE_OS_BITS "32") set (SITE_COMPILER_NAME "vs2019") set (SITE_COMPILER_VERSION "16") elseif (BUILD_GENERATOR STREQUAL "VS201764") - set (CTEST_CMAKE_GENERATOR "Visual Studio 15 2017 Win64") + if (DEFINED NINJA) + set (CTEST_CMAKE_GENERATOR "Ninja") + else () + set (CTEST_CMAKE_GENERATOR "Visual Studio 15 2017 Win64") + endif () set (SITE_OS_BITS "64") set (SITE_COMPILER_NAME "vs2017") set (SITE_COMPILER_VERSION "15") elseif (BUILD_GENERATOR STREQUAL "VS2017") - set (CTEST_CMAKE_GENERATOR "Visual Studio 15 2017") + if (DEFINED NINJA) + set (CTEST_CMAKE_GENERATOR "Ninja") + else () + set (CTEST_CMAKE_GENERATOR "Visual Studio 15 2017") + endif () set (SITE_OS_BITS "32") set (SITE_COMPILER_NAME "vs2017") set (SITE_COMPILER_VERSION "15") - elseif (BUILD_GENERATOR STREQUAL "VS201564") - set (CTEST_CMAKE_GENERATOR "Visual Studio 14 2015 Win64") - set (SITE_OS_BITS "64") - set (SITE_COMPILER_NAME "vs2015") - set (SITE_COMPILER_VERSION "14") - elseif (BUILD_GENERATOR STREQUAL "VS2015") - set (CTEST_CMAKE_GENERATOR "Visual Studio 14 2015") - set (SITE_OS_BITS "32") - set (SITE_COMPILER_NAME "vs2015") - set (SITE_COMPILER_VERSION "14") - elseif (BUILD_GENERATOR STREQUAL "VS201364") - set (CTEST_CMAKE_GENERATOR "Visual Studio 12 2013 Win64") - set (SITE_OS_BITS "64") - set (SITE_COMPILER_NAME "vs2013") - set (SITE_COMPILER_VERSION "12") - elseif (BUILD_GENERATOR STREQUAL "VS2013") - set (CTEST_CMAKE_GENERATOR "Visual Studio 12 2013") - set (SITE_OS_BITS "32") - set (SITE_COMPILER_NAME "vs2013") - set (SITE_COMPILER_VERSION "12") - elseif (BUILD_GENERATOR STREQUAL "VS201264") - set (CTEST_CMAKE_GENERATOR "Visual Studio 11 2012 Win64") - set (SITE_OS_BITS "64") - set (SITE_COMPILER_NAME "vs2012") - set (SITE_COMPILER_VERSION "11") - elseif (BUILD_GENERATOR STREQUAL "VS2012") - set (CTEST_CMAKE_GENERATOR "Visual Studio 11 2012") - set (SITE_OS_BITS "32") - set (SITE_COMPILER_NAME "vs2012") - set (SITE_COMPILER_VERSION "11") else () message (FATAL_ERROR "Invalid BUILD_GENERATOR must be - Unix, VS2022, VS202264, VS2019, VS201964") endif () ## Set the following to unique id your computer ## - set (CTEST_SITE "WIN10${BUILD_GENERATOR}.XXXX") + if(NOT DEFINED CTEST_SITE) + set (CTEST_SITE "WIN10${BUILD_GENERATOR}-${CTEST_SITE_EXT}") + endif() else () - if (MINGW) - set (CTEST_CMAKE_GENERATOR "MinGW Makefiles") + if (DEFINED NINJA) + set (CTEST_CMAKE_GENERATOR "Ninja") else () - set (CTEST_CMAKE_GENERATOR "Unix Makefiles") + if (MINGW) + set (CTEST_CMAKE_GENERATOR "MinGW Makefiles") + else () + set (CTEST_CMAKE_GENERATOR "Unix Makefiles") + endif () endif () ## Set the following to unique id your computer ## if (APPLE) - set (CTEST_SITE "MAC.XXXX") + if(NOT DEFINED CTEST_SITE) + set (CTEST_SITE "MAC-${CTEST_SITE_EXT}") + endif() else () - set (CTEST_SITE "LINUX.XXXX") + if(NOT DEFINED CTEST_SITE) + set (CTEST_SITE "LINUX-${CTEST_SITE_EXT}") + endif() endif () if (APPLE) execute_process (COMMAND xcrun --find cc OUTPUT_VARIABLE XCODE_CC OUTPUT_STRIP_TRAILING_WHITESPACE) diff --git a/config/commence.am b/config/commence.am index 05e4b653d89..11db41f133e 100644 --- a/config/commence.am +++ b/config/commence.am @@ -31,6 +31,7 @@ RUNEXEC=$(RUNSERIAL) # Libraries to link to while building LIBHDF5=$(top_builddir)/src/libhdf5.la LIBH5TEST=$(top_builddir)/test/libh5test.la +LIBH5TESTPAR=$(top_builddir)/testpar/libh5testpar.la LIBH5F=$(top_builddir)/fortran/src/libhdf5_fortran.la LIBH5FTEST=$(top_builddir)/fortran/test/libh5test_fortran.la LIBH5CPP=$(top_builddir)/c++/src/libhdf5_cpp.la diff --git a/configure.ac b/configure.ac index c51e19ddca3..c5bfe4276af 100644 --- a/configure.ac +++ b/configure.ac @@ -22,7 +22,7 @@ AC_PREREQ([2.71]) ## NOTE: Do not forget to change the version number here when we do a ## release!!! ## -AC_INIT([HDF5], [1.15.0], [help@hdfgroup.org]) +AC_INIT([HDF5], [1.17.0], [help@hdfgroup.org]) AC_CONFIG_SRCDIR([src/H5.c]) AC_CONFIG_HEADERS([src/H5config.h]) @@ -854,6 +854,7 @@ if test "X$HDF_FORTRAN" = "Xyes"; then if test "$MY_FLT128_DIG" -gt "$MY_LDBL_DIG" ; then AC_MSG_RESULT([yes]) AC_DEFINE([HAVE_FLOAT128], [1], [Determine if __float128 will be used in the Fortran wrappers]) + AC_CHECK_SIZEOF([__float128]) else ## Can't use __float128, but write an undef line anyway AC_MSG_RESULT([no]) @@ -4233,10 +4234,10 @@ esac AC_SUBST([DEFAULT_API_VERSION]) AC_MSG_CHECKING([which version of public symbols to use by default]) AC_ARG_WITH([default-api-version], - [AS_HELP_STRING([--with-default-api-version=(default|v16|v18|v110|v112|v114|v116)], + [AS_HELP_STRING([--with-default-api-version=(default|v16|v18|v110|v112|v114|v116|v118)], [Specify default release version of public symbols - [default=v116]])],, - [withval=v116]) + [default=v118]])],, + [withval=v118]) ## Allowing "default" allows the GitHub CI to check that we didn't forget ## to change the defaults when creating a new major version @@ -4265,11 +4266,16 @@ elif test "X$withval" = "Xv114"; then DEFAULT_API_VERSION=v114 AC_DEFINE([USE_114_API_DEFAULT], [1], [Define using v1.14 public API symbols by default]) -elif test "X$withval" = "Xv116" -o "X$withval" = "Xdefault"; then +elif test "X$withval" = "Xv116"; then AC_MSG_RESULT([v116]) DEFAULT_API_VERSION=v116 AC_DEFINE([USE_116_API_DEFAULT], [1], [Define using v1.16 public API symbols by default]) +elif test "X$withval" = "Xv118" -o "X$withval" = "Xdefault"; then + AC_MSG_RESULT([v118]) + DEFAULT_API_VERSION=v118 + AC_DEFINE([USE_118_API_DEFAULT], [1], + [Define using v1.18 public API symbols by default]) else AC_MSG_ERROR([invalid version of public symbols given]) fi @@ -4279,7 +4285,7 @@ fi ## if the user insists on doing this via the --enable-unsupported configure ## flag, we'll let them. if test "X${ALLOW_UNSUPPORTED}" != "Xyes"; then - if test "X${DEFAULT_API_VERSION}" != "Xv116" -a "X${DEPRECATED_SYMBOLS}" = "Xno" ; then + if test "X${DEFAULT_API_VERSION}" != "Xv118" -a "X${DEPRECATED_SYMBOLS}" = "Xno" ; then AC_MSG_ERROR([Removing old public API symbols not allowed when using them as default public API symbols. Use --enable-unsupported to override this error.]) fi fi diff --git a/doc/threadsafety-warning.md b/doc/threadsafety-warning.md new file mode 100644 index 00000000000..49abcb00af9 --- /dev/null +++ b/doc/threadsafety-warning.md @@ -0,0 +1,16 @@ +## A Warning + +Any application that creates threads that use the HDF5 library must join those threads before either process exit or library close through H5close(). If all HDF5-using threads aren't joined, the threads may exhibit undefined behavior. + +## Discussion for Developers on Potential Improvements + +It would in principle be possible to make it safe to have threads continue using HDF5 resources after a call to H5close() by keeping a count of threads within the library. (There is probably no solution to an early process exit producing undefined behavior within threads.) This method would only be able to count (and presumably, only _need_ to count) threads that directly interact with the library. Because each thread would need to be counted exactly once, this would most likely be done by use of a thread-local key with e.g. a boolean value used to track whether the a global atomic thread counter has already counted this thread. Then, if H5close() is invoked while this thread counter is above one (because one thread must be doing the closing), the library would not close, and instead keep its resources valid to hopefully avoid bad behavior with the threads. + +The issues with this approach are as follows: + +1. The process of checking for the existence/value of the thread-local key is slow, or at least slow enough that it's probably not worth adding this to almost every single API call to prevent this particular edge case. +2. Even with this approach, bad behavior would still be possible if the application does something like expose HDF5 resources to threads indirectly via a global variable. +3. How to allow H5close() to fail is nonobvious. H5close() could be allowed to return an error indicating a failure to close, but the number of applications which could usefully respond to such an error by joining threads is small. If an application were able/willing to join its created threads, presumably it would have done so before calling H5close(). Alternatively, H5close() could succeed but silently leave the library open. This creates the potential for confusing, unexpected behavior when the user thinks they are closing and re-opening the library, e.g. if environment variables are modified between close and re-open, or if resources such as default property lists are modified. +4. Applications should join threads before closing libraries that those threads are using, so all of this work would constitute an above-and-beyond effort to maintain safe and defined behavior in the face of an unsafe application. + +Despite these issues, if a more performant method was found to perform threadcounting like this, it might still constitute a worthwhile change. \ No newline at end of file diff --git a/doxygen/dox/CollectiveCalls.dox b/doxygen/dox/CollectiveCalls.dox index 9f26896262b..9fb7229a3ca 100644 --- a/doxygen/dox/CollectiveCalls.dox +++ b/doxygen/dox/CollectiveCalls.dox @@ -20,7 +20,7 @@ * \section sec_collective_calls_func Always collective * The following functions must always be called collectively. - +
API @@ -647,7 +647,7 @@ * If, however, the target object will not be modified, * they may be called independently. * - +
API @@ -943,7 +943,7 @@ * for an object or link in all cases where the object or link is accessed * in a parallel program. - +
* * + * + * + * + * + * + * + * + * * *
@@ -364,7 +364,7 @@ FORTRAN @@ -927,7 +927,7 @@ FORTRAN @@ -937,7 +937,7 @@ FORTRAN @@ -947,7 +947,7 @@ FORTRAN @@ -957,7 +957,7 @@ FORTRAN @@ -967,7 +967,7 @@ FORTRAN @@ -977,7 +977,7 @@ FORTRAN @@ -996,7 +996,7 @@ FORTRAN diff --git a/doxygen/dox/LearnHDFView.dox b/doxygen/dox/LearnHDFView.dox index 2a1ed610ef2..3b9afdb1ef1 100644 --- a/doxygen/dox/LearnHDFView.dox +++ b/doxygen/dox/LearnHDFView.dox @@ -7,7 +7,7 @@ This tutorial enables you to get a feel for HDF5 by using the HDFView browser. I any programming experience. \section sec_learn_hv_install HDFView Installation -\li Download and install HDFView. It can be downloaded from the Download HDFView page. +\li Download and install HDFView. It can be downloaded from the Download HDFView page. \li Obtain the storm1.txt text file, used in the tutorial. \section sec_learn_hv_begin Begin Tutorial diff --git a/doxygen/dox/Overview.dox b/doxygen/dox/Overview.dox index 1f68f7c50a3..b472d4d2a8d 100644 --- a/doxygen/dox/Overview.dox +++ b/doxygen/dox/Overview.dox @@ -21,7 +21,7 @@ documents cover a mix of tasks, concepts, and reference, to help a specific audience succeed. \par Offline reading - You can download it as an archive for offline reading. + You can download it as an archive for offline reading. \par ToDo List There is plenty of unfinished business. diff --git a/doxygen/dox/ReferenceManual.dox b/doxygen/dox/ReferenceManual.dox index ac1a4f22904..d7b5bf6f613 100644 --- a/doxygen/dox/ReferenceManual.dox +++ b/doxygen/dox/ReferenceManual.dox @@ -31,7 +31,7 @@ The functions provided by the HDF5 API are grouped into the following - @@ -103,7 +103,7 @@ The functions provided by the HDF5 API are grouped into the following - diff --git a/doxygen/dox/VOLConnGuide.dox b/doxygen/dox/VOLConnGuide.dox index c693980be1a..9781261e58c 100644 --- a/doxygen/dox/VOLConnGuide.dox +++ b/doxygen/dox/VOLConnGuide.dox @@ -4110,62 +4110,6 @@ Retrieves a pointer to the VOL object from an HDF5 file or object identifier. Returns a copy of the dtype_id parameter but with the location set to be in the file. Returns a negative value (#H5I_INVALID_HID) on errors. -\subsubsection subsubsecVOLNewConnpeek_name H5VLpeek_connector_id_by_name -
diff --git a/doxygen/dox/ExamplesAPI.dox b/doxygen/dox/ExamplesAPI.dox index dbd24f4d888..e2ee26ee28e 100644 --- a/doxygen/dox/ExamplesAPI.dox +++ b/doxygen/dox/ExamplesAPI.dox @@ -223,7 +223,7 @@ FORTRAN Read / Write Unlimited Dimension Dataset C -FORTRAN +FORTRAN Java JavaObj MATLAB PyHigh PyLow @@ -313,7 +313,7 @@ FORTRAN Create Intermediate Groups C -FORTRAN +FORTRAN Java JavaObj MATLAB PyHigh PyLow @@ -326,7 +326,7 @@ FORTRAN Iterate over Groups w/ H5Literate C -FORTRAN +FORTRAN Java JavaObj MATLAB PyHigh PyLow @@ -352,7 +352,7 @@ FORTRAN Recursively Traverse a File with H5Literate C - FORTRAN +FORTRAN Java JavaObj MATLAB PyHigh PyLow Recursively Traverse a File with H5Ovisit / H5Lvisit C - FORTRAN +FORTRAN Java JavaObj MATLAB PyHigh PyLow @@ -917,7 +917,7 @@ FORTRAN Creating and Accessing a File C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5Creating and Accessing a Dataset C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5Writing and Reading Contiguous Hyperslabs C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5Writing and Reading Regularly Spaced Data Hyperslabs C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5Writing and Reading Pattern Hyperslabs C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5Writing and Reading Chunk Hyperslabs C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5Using the Subfiling VFD to Write a File Striped Across Multiple Subfiles C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5Collectively Write Datasets with Filters and Not All Ranks have Data C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5
Event Set (H5ES)@ref H5ES "C""C++""Fortran""Java"HDF5 event set life cycle used with HDF5 VOL connectors that enable the asynchronous feature in HDF5. +Event Set (H5ES)@ref H5ES "C""C++"@ref FH5ES "Fortran""Java"HDF5 event set life cycle used with HDF5 VOL connectors that enable the asynchronous feature in HDF5.
HDF5 Optimizations APIs (H5DO)@ref H5DO "C""C++""Fortran""Java"Bypassing default HDF5 behavior in order to optimize for specific use cases. +HDF5 Optimizations APIs (H5DO)@ref H5DO "C""C++"@ref FH5DO "Fortran""Java"Bypassing default HDF5 behavior in order to optimize for specific use cases.
- - - - - - - - - - - - -
Signature:
-\code - hid_t H5VLpeek_connector_id_by_name(const char *name); -\endcode -
Arguments:
-\code - name (IN): name of the connector to query. -\endcode -
-Retrieves the ID for a registered VOL connector based on a connector name. This is done without duplicating -the ID and transferring ownership to the caller (as it normally the case in the HDF5 library). The ID returned -from this operation should not be closed. This is intended for use by VOL connectors to find their own ID. -Returns a negative value (#H5I_INVALID_HID) on errors. - -\subsubsection subsubsecVOLNewConnpeek_value H5VLpeek_connector_id_by_value - - - - - - - - - - - - - -
Signature:
-\code - hid_t H5VLpeek_connector_id_by_value(H5VL_class_value_t value); -\endcode -
Arguments:
-\code - value (IN): value of the connector to query. -\endcode -
-Retrieves the ID for a registered VOL connector based on a connector value. This is done without duplicating -the ID and transferring ownership to the caller (as it normally the case in the HDF5 library). The ID returned -from this operation should not be closed. This is intended for use by VOL connectors to find their own ID. -Returns a negative value (#H5I_INVALID_HID) on errors. - \subsection subsecVOLNewPass H5VLconnector_passthru.h This functionality is intended for VOL connector authors who are writing pass-through connectors and includes helper functions that are useful for writing such connectors. Callback equivalent functions can be diff --git a/fortran/src/H5VLff.F90 b/fortran/src/H5VLff.F90 index 66d098bfab6..b07e1389f21 100644 --- a/fortran/src/H5VLff.F90 +++ b/fortran/src/H5VLff.F90 @@ -401,6 +401,41 @@ END FUNCTION H5VLunregister_connector END SUBROUTINE H5VLunregister_connector_f +!> +!! \ingroup FH5VL +!! +!! \brief Determines whether two connector identifiers refer to the same connector. +!! +!! \param conn_id1 A valid identifier of the first connector to check +!! \param conn_id2 A valid identifier of the second connector to check +!! \param are_same Whether connector IDs refer to the same connector +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5VLcmp_connector_cls() +!! + SUBROUTINE H5VLcmp_connector_cls_f(are_same, conn_id1, conn_id2, hdferr) + IMPLICIT NONE + LOGICAL, INTENT(OUT) :: are_same + INTEGER(HID_T), INTENT(IN) :: conn_id1 + INTEGER(HID_T), INTENT(IN) :: conn_id2 + INTEGER, INTENT(OUT) :: hdferr + INTEGER :: are_same_c + + INTERFACE + INTEGER(C_INT) FUNCTION H5VLcmp_connector_cls(cmp_value, conn_id1, conn_id2) BIND(C, NAME='H5VLcmp_connector_cls') + IMPORT :: HID_T, C_INT + INTEGER(C_INT), INTENT(OUT) :: cmp_value + INTEGER(HID_T), VALUE :: conn_id1 + INTEGER(HID_T), VALUE :: conn_id2 + END FUNCTION H5VLcmp_connector_cls + END INTERFACE + + are_same = .FALSE. + hdferr = INT(H5VLcmp_connector_cls(are_same_c, conn_id1, conn_id2)) + IF(are_same_c .EQ. 0) are_same = .TRUE. + + END SUBROUTINE H5VLcmp_connector_cls_f + !> !! \ingroup FH5VL !! diff --git a/fortran/src/H5_f.c b/fortran/src/H5_f.c index 3b278bbbc54..ac3c7bfceed 100644 --- a/fortran/src/H5_f.c +++ b/fortran/src/H5_f.c @@ -540,6 +540,7 @@ h5init_flags_c(int_f *h5d_flags, size_t_f *h5d_size_flags, int_f *h5e_flags, hid h5f_flags[28] = (int_f)H5F_LIBVER_V112; h5f_flags[29] = (int_f)H5F_LIBVER_V114; h5f_flags[30] = (int_f)H5F_LIBVER_V116; + h5f_flags[31] = (int_f)H5F_LIBVER_V118; /* * H5FD flags @@ -568,17 +569,17 @@ h5init_flags_c(int_f *h5d_flags, size_t_f *h5d_size_flags, int_f *h5e_flags, hid h5fd_flags[20] = (int_f)SELECT_IOC_TOTAL; h5fd_flags[21] = (int_f)ioc_selection_options; #else - h5fd_flags[11] = 0; - h5fd_flags[12] = 0; - h5fd_flags[13] = 0; - h5fd_flags[14] = 0; - h5fd_flags[15] = 0; - h5fd_flags[16] = 0; - h5fd_flags[17] = 0; - h5fd_flags[18] = 0; - h5fd_flags[19] = 0; - h5fd_flags[20] = 0; - h5fd_flags[21] = 0; + h5fd_flags[11] = 0; + h5fd_flags[12] = 0; + h5fd_flags[13] = 0; + h5fd_flags[14] = 0; + h5fd_flags[15] = 0; + h5fd_flags[16] = 0; + h5fd_flags[17] = 0; + h5fd_flags[18] = 0; + h5fd_flags[19] = 0; + h5fd_flags[20] = 0; + h5fd_flags[21] = 0; #endif /* diff --git a/fortran/src/H5_ff.F90 b/fortran/src/H5_ff.F90 index f4bb211e0d1..d8c18374045 100644 --- a/fortran/src/H5_ff.F90 +++ b/fortran/src/H5_ff.F90 @@ -56,7 +56,7 @@ MODULE H5LIB ! ! H5F flags declaration ! - INTEGER, PARAMETER :: H5F_FLAGS_LEN = 31 + INTEGER, PARAMETER :: H5F_FLAGS_LEN = 32 INTEGER, DIMENSION(1:H5F_FLAGS_LEN) :: H5F_flags ! ! H5generic flags declaration @@ -380,6 +380,7 @@ END FUNCTION h5init1_flags_c H5F_LIBVER_V112_F = H5F_flags(29) H5F_LIBVER_V114_F = H5F_flags(30) H5F_LIBVER_V116_F = H5F_flags(31) + H5F_LIBVER_V118_F = H5F_flags(32) ! ! H5generic flags ! diff --git a/fortran/src/H5f90global.F90 b/fortran/src/H5f90global.F90 index 6d8697da699..ee2c6ab3aec 100644 --- a/fortran/src/H5f90global.F90 +++ b/fortran/src/H5f90global.F90 @@ -237,6 +237,7 @@ MODULE H5GLOBAL !DEC$ATTRIBUTES DLLEXPORT :: H5F_LIBVER_V112_F !DEC$ATTRIBUTES DLLEXPORT :: H5F_LIBVER_V114_F !DEC$ATTRIBUTES DLLEXPORT :: H5F_LIBVER_V116_F + !DEC$ATTRIBUTES DLLEXPORT :: H5F_LIBVER_V118_F !DEC$ATTRIBUTES DLLEXPORT :: H5F_FSPACE_STRATEGY_FSM_AGGR_F !DEC$ATTRIBUTES DLLEXPORT :: H5F_FSPACE_STRATEGY_PAGE_F !DEC$ATTRIBUTES DLLEXPORT :: H5F_FSPACE_STRATEGY_AGGR_F @@ -271,6 +272,7 @@ MODULE H5GLOBAL INTEGER :: H5F_LIBVER_V112_F !< H5F_LIBVER_V112 INTEGER :: H5F_LIBVER_V114_F !< H5F_LIBVER_V114 INTEGER :: H5F_LIBVER_V116_F !< H5F_LIBVER_V116 + INTEGER :: H5F_LIBVER_V118_F !< H5F_LIBVER_V118 INTEGER :: H5F_FSPACE_STRATEGY_FSM_AGGR_F !< H5F_FSPACE_STRATEGY_FSM_AGGR INTEGER :: H5F_FSPACE_STRATEGY_PAGE_F !< H5F_FSPACE_STRATEGY_PAGE INTEGER :: H5F_FSPACE_STRATEGY_AGGR_F !< H5F_FSPACE_STRATEGY_AGGR diff --git a/fortran/src/hdf5_fortrandll.def.in b/fortran/src/hdf5_fortrandll.def.in index ae5465f73a2..5bc4a274c42 100644 --- a/fortran/src/hdf5_fortrandll.def.in +++ b/fortran/src/hdf5_fortrandll.def.in @@ -593,6 +593,7 @@ H5VL_mp_H5VLGET_CONNECTOR_ID_BY_NAME_F H5VL_mp_H5VLGET_CONNECTOR_ID_BY_VALUE_F H5VL_mp_H5VLGET_CONNECTOR_NAME_F H5VL_mp_H5VLCLOSE_F +H5VL_mp_H5VLCMP_CONNECTOR_CLS_F H5VL_mp_H5VLUNREGISTER_CONNECTOR_F H5VL_mp_H5VLNATIVE_ADDR_TO_TOKEN_F H5VL_mp_H5VLNATIVE_TOKEN_TO_ADDR_F diff --git a/fortran/test/tH5P_F03.F90 b/fortran/test/tH5P_F03.F90 index 95b634e6782..f411338ccc8 100644 --- a/fortran/test/tH5P_F03.F90 +++ b/fortran/test/tH5P_F03.F90 @@ -235,6 +235,13 @@ SUBROUTINE test_create(total_error) CALL VERIFY("***ERROR: Returned wrong low libver_bounds", low, H5F_LIBVER_V116_F, total_error) CALL VERIFY("***ERROR: Returned wrong high libver_bounds", high, H5F_LIBVER_V116_F, total_error) + CALL h5pset_libver_bounds_f(fapl, H5F_LIBVER_V118_F, H5F_LIBVER_V118_F, error) + CALL check("h5pset_libver_bounds_f",error, total_error) + CALL h5pget_libver_bounds_f(fapl, low, high, error) + CALL check("h5pget_libver_bounds_f",error, total_error) + CALL VERIFY("***ERROR: Returned wrong low libver_bounds", low, H5F_LIBVER_V118_F, total_error) + CALL VERIFY("***ERROR: Returned wrong high libver_bounds", high, H5F_LIBVER_V118_F, total_error) + CALL H5Pset_libver_bounds_f(fapl, H5F_LIBVER_LATEST_F, H5F_LIBVER_LATEST_F, error) CALL check("H5Pset_libver_bounds_f",error, total_error) CALL h5pget_libver_bounds_f(fapl, low, high, error) diff --git a/fortran/test/vol_connector.F90 b/fortran/test/vol_connector.F90 index 7394a31d6af..44446ddf361 100644 --- a/fortran/test/vol_connector.F90 +++ b/fortran/test/vol_connector.F90 @@ -160,6 +160,7 @@ SUBROUTINE test_registration_by_fapl(total_error) INTEGER(hid_t) :: file_id INTEGER(hid_t) :: fapl_id TYPE(C_PTR) :: f_ptr + LOGICAL :: are_same = .FALSE. CALL H5VLis_connector_registered_by_name_f( "FAKE_VOL_CONNECTOR_NAME", is_registered, error) @@ -189,7 +190,9 @@ SUBROUTINE test_registration_by_fapl(total_error) CALL H5Pget_vol_id_f(fapl_id, vol_id_out, error) CALL check("H5Pget_vol_id_f",error,total_error) - CALL VERIFY("H5Pget_vol_id_f", vol_id_out, vol_id, total_error) + CALL H5VLcmp_connector_cls_f(are_same, vol_id, vol_id_out, error) + CALL check("H5VLcmp_connector_cls_f",error,total_error) + CALL VERIFY("H5VLcmp_connector_cls_f", are_same, .TRUE., total_error) f_ptr = C_NULL_PTR CALL H5Pset_vol_f(fapl_id, vol_id, error, f_ptr) @@ -197,14 +200,20 @@ SUBROUTINE test_registration_by_fapl(total_error) CALL H5Pget_vol_id_f(fapl_id, vol_id_out, error) CALL check("H5Pget_vol_id_f",error,total_error) - CALL VERIFY("H5Pget_vol_id_f", vol_id_out, vol_id, total_error) + are_same = .FALSE. + CALL H5VLcmp_connector_cls_f(are_same, vol_id, vol_id_out, error) + CALL check("H5VLcmp_connector_cls_f",error,total_error) + CALL VERIFY("H5VLcmp_connector_cls_f", are_same, .TRUE., total_error) ENDIF CALL H5VLget_connector_id_by_name_f(NATIVE_VOL_CONNECTOR_NAME, vol_id_out, error) CALL check("H5VLget_connector_id_by_name_f",error,total_error) - CALL VERIFY("H5VLget_connector_id_by_name_f", vol_id_out, vol_id, total_error) - CALL H5Fcreate_f("voltest.h5",H5F_ACC_TRUNC_F, file_id, error, H5P_DEFAULT_F, fapl_id) + are_same = .FALSE. + CALL H5VLcmp_connector_cls_f(are_same, vol_id, vol_id_out, error) + CALL check("H5VLcmp_connector_cls_f",error,total_error) + CALL VERIFY("H5VLcmp_connector_cls_f", are_same, .TRUE., total_error) + CALL H5Fcreate_f("voltest.h5",H5F_ACC_TRUNC_F, file_id, error, H5P_DEFAULT_F, fapl_id) CALL check("H5F_create_f",error,total_error) CALL H5VLclose_f(vol_id_out, error) diff --git a/hl/examples/ex_table_03.c b/hl/examples/ex_table_03.c index 694eeb50277..280c6a4f571 100644 --- a/hl/examples/ex_table_03.c +++ b/hl/examples/ex_table_03.c @@ -47,7 +47,7 @@ main(void) Particle p = {"zero", 0, 0, 0.0F, 0.0}; size_t dst_sizes[NFIELDS] = {sizeof(p.name), sizeof(p.lati), sizeof(p.longi), sizeof(p.pressure), - sizeof(p.temperature)}; + sizeof(p.temperature)}; /* Define field information */ const char *field_names[NFIELDS] = {"Name", "Latitude", "Longitude", "Pressure", "Temperature"}; diff --git a/hl/examples/ex_table_04.c b/hl/examples/ex_table_04.c index 4960173a3be..ba7b4363557 100644 --- a/hl/examples/ex_table_04.c +++ b/hl/examples/ex_table_04.c @@ -56,7 +56,7 @@ main(void) size_t dst_offset[NFIELDS] = {HOFFSET(Particle, name), HOFFSET(Particle, lati), HOFFSET(Particle, longi), HOFFSET(Particle, pressure), HOFFSET(Particle, temperature)}; size_t dst_sizes[NFIELDS] = {sizeof(dst_buf[0].name), sizeof(dst_buf[0].lati), sizeof(dst_buf[0].longi), - sizeof(dst_buf[0].pressure), sizeof(dst_buf[0].temperature)}; + sizeof(dst_buf[0].pressure), sizeof(dst_buf[0].temperature)}; size_t field_offset_pos[2] = {HOFFSET(Position, lati), HOFFSET(Position, longi)}; const char *field_names[NFIELDS] = /* Define field information */ {"Name", "Latitude", "Longitude", "Pressure", "Temperature"}; diff --git a/hl/examples/ex_table_05.c b/hl/examples/ex_table_05.c index 9a689e32621..d7cecaf6959 100644 --- a/hl/examples/ex_table_05.c +++ b/hl/examples/ex_table_05.c @@ -50,7 +50,7 @@ main(void) size_t dst_offset[NFIELDS] = {HOFFSET(Particle, name), HOFFSET(Particle, lati), HOFFSET(Particle, longi), HOFFSET(Particle, pressure), HOFFSET(Particle, temperature)}; size_t dst_sizes[NFIELDS] = {sizeof(dst_buf[0].name), sizeof(dst_buf[0].lati), sizeof(dst_buf[0].longi), - sizeof(dst_buf[0].pressure), sizeof(dst_buf[0].temperature)}; + sizeof(dst_buf[0].pressure), sizeof(dst_buf[0].temperature)}; size_t field_offset_pos[2] = {HOFFSET(Position, lati), HOFFSET(Position, longi)}; diff --git a/hl/examples/ex_table_08.c b/hl/examples/ex_table_08.c index 3d67b16ed3e..d66a1b9e4d1 100644 --- a/hl/examples/ex_table_08.c +++ b/hl/examples/ex_table_08.c @@ -50,7 +50,7 @@ main(void) size_t dst_offset[NFIELDS] = {HOFFSET(Particle, name), HOFFSET(Particle, lati), HOFFSET(Particle, longi), HOFFSET(Particle, pressure), HOFFSET(Particle, temperature)}; size_t dst_sizes[NFIELDS] = {sizeof(p_data[0].name), sizeof(p_data[0].lati), sizeof(p_data[0].longi), - sizeof(p_data[0].pressure), sizeof(p_data[0].temperature)}; + sizeof(p_data[0].pressure), sizeof(p_data[0].temperature)}; /* Define an array of Particles to insert */ Particle p_data_insert[NRECORDS_INS] = {{"new", 30, 30, 3.0F, 30.0}, {"new", 40, 40, 4.0F, 40.0}}; diff --git a/hl/examples/ex_table_09.c b/hl/examples/ex_table_09.c index 37042951f34..ba515ee188a 100644 --- a/hl/examples/ex_table_09.c +++ b/hl/examples/ex_table_09.c @@ -45,7 +45,7 @@ main(void) size_t dst_offset[NFIELDS] = {HOFFSET(Particle, name), HOFFSET(Particle, lati), HOFFSET(Particle, longi), HOFFSET(Particle, pressure), HOFFSET(Particle, temperature)}; size_t dst_sizes[NFIELDS] = {sizeof(dst_buf[0].name), sizeof(dst_buf[0].lati), sizeof(dst_buf[0].longi), - sizeof(dst_buf[0].pressure), sizeof(dst_buf[0].temperature)}; + sizeof(dst_buf[0].pressure), sizeof(dst_buf[0].temperature)}; /* Define an array of Particles */ Particle p_data[NRECORDS] = {{"zero", 0, 0, 0.0F, 0.0}, {"one", 10, 10, 1.0F, 10.0}, diff --git a/hl/examples/ex_table_10.c b/hl/examples/ex_table_10.c index ad3f2e15589..d462f7f020f 100644 --- a/hl/examples/ex_table_10.c +++ b/hl/examples/ex_table_10.c @@ -50,7 +50,7 @@ main(void) size_t dst_offset[NFIELDS] = {HOFFSET(Particle, name), HOFFSET(Particle, lati), HOFFSET(Particle, longi), HOFFSET(Particle, pressure), HOFFSET(Particle, temperature)}; size_t dst_sizes[NFIELDS] = {sizeof(dst_buf[0].name), sizeof(dst_buf[0].lati), sizeof(dst_buf[0].longi), - sizeof(dst_buf[0].pressure), sizeof(dst_buf[0].temperature)}; + sizeof(dst_buf[0].pressure), sizeof(dst_buf[0].temperature)}; /* Define field information */ const char *field_names[NFIELDS] = {"Name", "Latitude", "Longitude", "Pressure", "Temperature"}; diff --git a/hl/src/H5LT.c b/hl/src/H5LT.c index 7d44792fbc0..7b55cedeb3b 100644 --- a/hl/src/H5LT.c +++ b/hl/src/H5LT.c @@ -1228,7 +1228,7 @@ find_dataset(H5_ATTR_UNUSED hid_t loc_id, const char *name, H5_ATTR_UNUSED const * cause the iterator to immediately return that positive value, * indicating short-circuit success */ - if (strncmp(name, (char *)op_data, strlen((char *)op_data)) == 0) + if (strcmp(name, (char *)op_data) == 0) ret = 1; return ret; diff --git a/hl/test/gen_test_ds.c b/hl/test/gen_test_ds.c index 438a35aab75..525f44a046b 100644 --- a/hl/test/gen_test_ds.c +++ b/hl/test/gen_test_ds.c @@ -136,9 +136,9 @@ create_long_dataset(hid_t fid, const char *name, const char *dsidx) int rankds = 1; hsize_t dims[4] = {DIM1_SIZE, DIM2_SIZE, DIM3_SIZE, DIM4_SIZE}; long buf[DIM_DATA * 3 * 2] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, - 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, - 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; hsize_t s1_dim[1] = {DIM1_SIZE}; hsize_t s2_dim[1] = {DIM2_SIZE}; hsize_t s3_dim[1] = {DIM3_SIZE}; diff --git a/hl/test/test_ds.c b/hl/test/test_ds.c index f85ed81b7fb..0172cb45a22 100644 --- a/hl/test/test_ds.c +++ b/hl/test/test_ds.c @@ -1341,10 +1341,11 @@ test_detachscales(void) static int test_char_attachscales(const char *fileext) { - hid_t fid = -1; - hid_t did = -1; - char dsname[32]; - char scalename[32]; + hid_t fid = -1; + hid_t did = -1; + char dsname[32]; + char scalename[32]; + herr_t ds_existed = 0; snprintf(dsname, sizeof(dsname), "%s%s", DATASET_NAME, "ac"); @@ -1357,6 +1358,14 @@ test_char_attachscales(const char *fileext) if (create_char_dataset(fid, "ac", 0) < 0) goto out; + /* test finding dataset dsname */ + if ((ds_existed = H5LTfind_dataset(fid, dsname)) < 0) + goto out; + if (ds_existed == 0) { + printf("Unexpected result: Dataset \"%s\" does exist\n", dsname); + goto out; + } + if ((did = H5Dopen2(fid, dsname, H5P_DEFAULT)) >= 0) { snprintf(scalename, sizeof(scalename), "%s%s", DS_1_NAME, "ac"); if (test_attach_scale(fid, did, scalename, DIM0) < 0) diff --git a/hl/test/test_lite.c b/hl/test/test_lite.c index 9bbad45d609..23508b79990 100644 --- a/hl/test/test_lite.c +++ b/hl/test/test_lite.c @@ -29,6 +29,9 @@ #define DSET6_NAME "dataset double" #define DSET7_NAME "dataset string" +/* Name of a non-existing dataset, do not create a dataset with this name */ +#define NODS_NAME "dataset" + #define DIM 6 #define ATTR_NAME_SUB "att" @@ -60,6 +63,7 @@ test_dsets(void) hsize_t dims[2] = {2, 3}; hid_t file_id; hid_t dataset_id; + herr_t ds_existed = 0; /* whether searched ds exists */ char data_char_in[DIM] = {1, 2, 3, 4, 5, 6}; char data_char_out[DIM]; short data_short_in[DIM] = {1, 2, 3, 4, 5, 6}; @@ -348,6 +352,23 @@ test_dsets(void) if (strcmp(data_string_in, data_string_out) != 0) goto out; + PASSED(); + + /*------------------------------------------------------------------------- + * H5LTfind_dataset test + *------------------------------------------------------------------------- + */ + + HL_TESTING2("H5LTfind_dataset"); + + /* Try to find a non-existing ds whose name matches existing datasets partially */ + if ((ds_existed = H5LTfind_dataset(file_id, NODS_NAME)) < 0) + goto out; + if (ds_existed > 0) { + printf("Dataset \"%s\" does not exist.\n", NODS_NAME); + goto out; + } + /*------------------------------------------------------------------------- * end tests *------------------------------------------------------------------------- @@ -1075,7 +1096,7 @@ test_integers(void) char *dt_str; size_t str_len; - HL_TESTING3("\n text for integer types"); + HL_TESTING3(" text for integer types"); if ((dtype = H5LTtext_to_dtype("H5T_NATIVE_INT\n", H5LT_DDL)) < 0) goto out; @@ -1881,6 +1902,7 @@ test_text_dtype(void) { HL_TESTING2("H5LTtext_to_dtype"); + printf("\n"); if (test_integers() < 0) goto out; diff --git a/hl/test/test_packet_vlen.c b/hl/test/test_packet_vlen.c index 99a5dbef91d..d17fcf1412f 100644 --- a/hl/test/test_packet_vlen.c +++ b/hl/test/test_packet_vlen.c @@ -861,9 +861,11 @@ verify_attribute(hid_t fid, const char *table_name, const char *attr_name) /* Verify values read in */ for (ii = 0; ii < ATTR_DIM; ii++) - if (attr_data[ii] != read_data[ii]) - TestErrPrintf("%d: attribute data different: attr_data[%d]=%d, read_data[%d]=%d\n", __LINE__, ii, - attr_data[ii], ii, read_data[ii]); + if (attr_data[ii] != read_data[ii]) { + fprintf(stderr, "%d: attribute data different: attr_data[%d]=%d, read_data[%d]=%d\n", __LINE__, + ii, attr_data[ii], ii, read_data[ii]); + goto error; + } /* Close the attribute */ if (H5Aclose(attr_id) < 0) diff --git a/hl/test/test_table.c b/hl/test/test_table.c index 8996fa46480..4aa359fedcd 100644 --- a/hl/test/test_table.c +++ b/hl/test/test_table.c @@ -236,13 +236,13 @@ test_table(hid_t fid, int do_write) 0.0, 0, }, - {"one", 10, 1.0F, 10.0, 10}, - {"two", 20, 2.0F, 20.0, 20}, - {"three", 30, 3.0F, 30.0, 30}, - {"four", 40, 4.0F, 40.0, 40}, - {"five", 50, 5.0F, 50.0, 50}, - {"six", 60, 6.0F, 60.0, 60}, - {"seven", 70, 7.0F, 70.0, 70}}; + {"one", 10, 1.0F, 10.0, 10}, + {"two", 20, 2.0F, 20.0, 20}, + {"three", 30, 3.0F, 30.0, 30}, + {"four", 40, 4.0F, 40.0, 40}, + {"five", 50, 5.0F, 50.0, 50}, + {"six", 60, 6.0F, 60.0, 60}, + {"seven", 70, 7.0F, 70.0, 70}}; /* buffers for the field "Pressure" and "New_field" */ float pressure_in[NRECORDS] = {0.0F, 1.0F, 2.0F, 3.0F, 4.0F, 5.0F, 6.0F, 7.0F}; float pressure_out[NRECORDS]; @@ -302,8 +302,8 @@ test_table(hid_t fid, int do_write) HOFFSET(particle2_t, pressure), HOFFSET(particle2_t, temperature), HOFFSET(particle2_t, lati), HOFFSET(particle2_t, new_field)}; size_t dst_sizes2[NFIELDS + 1] = {sizeof(rbuf2[0].name), sizeof(rbuf2[0].longi), - sizeof(rbuf2[0].pressure), sizeof(rbuf2[0].temperature), - sizeof(rbuf2[0].lati), sizeof(rbuf2[0].new_field)}; + sizeof(rbuf2[0].pressure), sizeof(rbuf2[0].temperature), + sizeof(rbuf2[0].lati), sizeof(rbuf2[0].new_field)}; /*------------------------------------------------------------------------- * initialize table parameters * size and the offsets of struct members in memory @@ -353,7 +353,7 @@ test_table(hid_t fid, int do_write) HOFFSET(particle_t, pressure), HOFFSET(particle_t, temperature), HOFFSET(particle_t, lati)}; size_t field_size[NFIELDS] = {sizeof(rbuf[0].name), sizeof(rbuf[0].longi), sizeof(rbuf[0].pressure), - sizeof(rbuf[0].temperature), sizeof(rbuf[0].lati)}; + sizeof(rbuf[0].temperature), sizeof(rbuf[0].lati)}; const char *field_names4[NFIELDS + 1] = {"F1", "F2", "F3", "F4", "F5", "F6"}; hid_t field_type4[NFIELDS + 1]; diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java index 388ba18f87a..54c51798cf7 100644 --- a/java/src/hdf/hdf5lib/H5.java +++ b/java/src/hdf/hdf5lib/H5.java @@ -231,7 +231,7 @@ * which prints out the HDF5 error stack, as described in the HDF5 C API @ref H5Eprint(). This * may be used by Java exception handlers to print out the HDF5 error stack.
* - * @version HDF5 1.15.0
+ * @version HDF5 1.17.0
* See also: * @ref HDFARRAY hdf.hdf5lib.HDFArray
* @ref HDF5CONST hdf.hdf5lib.HDF5Constants
@@ -273,7 +273,7 @@ public class H5 implements java.io.Serializable { * * Make sure to update the versions number when a different library is used. */ - public final static int LIB_VERSION[] = {1, 15, 0}; + public final static int LIB_VERSION[] = {1, 17, 0}; /** * @ingroup JH5 @@ -15497,6 +15497,24 @@ public synchronized static native String H5VLget_connector_name(long object_id) public synchronized static native void H5VLunregister_connector(long connector_id) throws HDF5LibraryException; + /** + * @ingroup JH5VL + * + * H5VLcmp_connector_cls Determines whether two connector identifiers refer to the same connector. + * + * @param conn_id1 + * IN: Identifier of connector to compare. + * @param conn_id2 + * IN: Identifier of connector to compare. + * + * @return true if the connector identifiers refer to the same connector, else false. + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + **/ + public synchronized static native boolean H5VLcmp_connector_cls(long conn_id1, long conn_id2) + throws HDF5LibraryException; + // /////// unimplemented //////// // hid_t H5VLregister_connector(const H5VL_class_t *cls, hid_t vipl_id); diff --git a/java/src/hdf/hdf5lib/HDF5Constants.java b/java/src/hdf/hdf5lib/HDF5Constants.java index 55b6f4be913..0260ce879e9 100644 --- a/java/src/hdf/hdf5lib/HDF5Constants.java +++ b/java/src/hdf/hdf5lib/HDF5Constants.java @@ -575,6 +575,8 @@ public class HDF5Constants { /** */ public static final int H5F_LIBVER_V116 = H5F_LIBVER_V116(); /** */ + public static final int H5F_LIBVER_V118 = H5F_LIBVER_V118(); + /** */ public static final int H5F_LIBVER_NBOUNDS = H5F_LIBVER_NBOUNDS(); /** */ public static final int H5F_LIBVER_LATEST = H5F_LIBVER_LATEST(); @@ -2060,6 +2062,8 @@ public class HDF5Constants { private static native final int H5F_LIBVER_V116(); + private static native final int H5F_LIBVER_V118(); + private static native final int H5F_LIBVER_NBOUNDS(); private static native final int H5F_LIBVER_LATEST(); diff --git a/java/src/jni/h5Constants.c b/java/src/jni/h5Constants.c index aeec71fb9f4..41cd6344033 100644 --- a/java/src/jni/h5Constants.c +++ b/java/src/jni/h5Constants.c @@ -1309,6 +1309,11 @@ Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1V116(JNIEnv *env, jclass cls) return H5F_LIBVER_V116; } JNIEXPORT jint JNICALL +Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1V118(JNIEnv *env, jclass cls) +{ + return H5F_LIBVER_V118; +} +JNIEXPORT jint JNICALL Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1NBOUNDS(JNIEnv *env, jclass cls) { return H5F_LIBVER_NBOUNDS; diff --git a/java/src/jni/h5vlImp.c b/java/src/jni/h5vlImp.c index 47e532a5609..b2fc6a1aebf 100644 --- a/java/src/jni/h5vlImp.c +++ b/java/src/jni/h5vlImp.c @@ -19,6 +19,7 @@ extern "C" { #include "hdf5.h" #include "h5jni.h" #include "h5vlImp.h" +#include "H5VLconnector_passthru.h" /* * Class: hdf_hdf5lib_H5 @@ -272,6 +273,29 @@ Java_hdf_hdf5lib_H5_H5VLunregister_1connector(JNIEnv *env, jclass clss, jlong co return; } /* end Java_hdf_hdf5lib_H5_H5VLunregister_1connector */ +/* + * Class: hdf_hdf5lib_H5 + * Method: H5VLcmp_connector_cls + * Signature: (JJ)Z + */ +JNIEXPORT jboolean JNICALL +Java_hdf_hdf5lib_H5_H5VLcmp_1connector_1cls(JNIEnv *env, jclass clss, jlong conn_id1, jlong conn_id2) +{ + int cmp_value = 0; + jboolean bval = JNI_FALSE; + herr_t retValue = FAIL; + + UNUSED(clss); + + if ((retValue = H5VLcmp_connector_cls(&cmp_value, (hid_t)conn_id1, (hid_t)conn_id2)) < 0) + H5_LIBRARY_ERROR(ENVONLY); + + bval = (cmp_value == 0) ? JNI_TRUE : JNI_FALSE; + +done: + return bval; +} /* end Java_hdf_hdf5lib_H5_H5VLcmp_connector_cls */ + #ifdef __cplusplus } /* end extern "C" */ #endif /* __cplusplus */ diff --git a/java/src/jni/h5vlImp.h b/java/src/jni/h5vlImp.h index a17807f9380..d3248f53d13 100644 --- a/java/src/jni/h5vlImp.h +++ b/java/src/jni/h5vlImp.h @@ -93,6 +93,13 @@ JNIEXPORT void JNICALL Java_hdf_hdf5lib_H5_H5VLclose(JNIEnv *, jclass, jlong); */ JNIEXPORT void JNICALL Java_hdf_hdf5lib_H5_H5VLunregister_1connector(JNIEnv *, jclass, jlong); +/* + * Class: hdf_hdf5lib_H5 + * Method: H5VLcmp_connector_cls + * Signature: (JJ)Z + */ +JNIEXPORT jboolean JNICALL Java_hdf_hdf5lib_H5_H5VLcmp_1connector_1cls(JNIEnv *, jclass, jlong, jlong); + #ifdef __cplusplus } /* end extern "C" */ #endif /* __cplusplus */ diff --git a/java/test/TestAll.java b/java/test/TestAll.java index c22fbfe2ca8..5f3b14c3e01 100644 --- a/java/test/TestAll.java +++ b/java/test/TestAll.java @@ -28,5 +28,4 @@ TestH5Obasic.class, TestH5Ocopy.class, TestH5Ocreate.class, TestH5PL.class, TestH5Z.class}) -public class TestAll { -} +public class TestAll {} diff --git a/java/test/TestH5.java b/java/test/TestH5.java index d3ce84426ca..bfe2cc59540 100644 --- a/java/test/TestH5.java +++ b/java/test/TestH5.java @@ -313,7 +313,7 @@ public void testH5set_free_list_limits() @Test public void testH5get_libversion() { - int libversion[] = {1, 15, 0}; + int libversion[] = {1, 17, 0}; try { H5.H5get_libversion(libversion); @@ -354,7 +354,7 @@ public void testH5get_libversion_null_param() @Test public void testH5check_version() { - int majnum = 1, minnum = 15, relnum = 0; + int majnum = 1, minnum = 17, relnum = 0; try { H5.H5check_version(majnum, minnum, relnum); diff --git a/java/test/TestH5D.java b/java/test/TestH5D.java index eacaabf82eb..db2a0428db7 100644 --- a/java/test/TestH5D.java +++ b/java/test/TestH5D.java @@ -939,8 +939,8 @@ public int callback(byte[] elem_buf, long elem_id, int ndim, long[] point, H5D_i public void testH5Dvlen_get_buf_size() { String[] str_data = {"Parting", "is such", "sweet", "sorrow.", "Testing", "one", "two", "three.", - "Dog,", "man's", "best", "friend.", "Diamonds", "are", "a", "girls!", - "S A", "T U R", "D A Y", "night", "That's", "all", "folks", "!!!"}; + "Dog,", "man's", "best", "friend.", "Diamonds", "are", "a", "girls!", + "S A", "T U R", "D A Y", "night", "That's", "all", "folks", "!!!"}; long vl_size = -1; /* Number of bytes used */ long str_data_bytes = 0; for (int idx = 0; idx < str_data.length; idx++) diff --git a/java/test/TestH5F.java b/java/test/TestH5F.java index fde27398a73..5ae8f7e0c0e 100644 --- a/java/test/TestH5F.java +++ b/java/test/TestH5F.java @@ -45,8 +45,8 @@ public class TestH5F { private static final int[] OBJ_COUNTS = {COUNT_OBJ_FILE, COUNT_OBJ_DATASET, COUNT_OBJ_GROUP, COUNT_OBJ_DATATYPE, COUNT_OBJ_ATTR, COUNT_OBJ_ALL}; private static final int[] OBJ_TYPES = {HDF5Constants.H5F_OBJ_FILE, HDF5Constants.H5F_OBJ_DATASET, - HDF5Constants.H5F_OBJ_GROUP, HDF5Constants.H5F_OBJ_DATATYPE, - HDF5Constants.H5F_OBJ_ATTR, HDF5Constants.H5F_OBJ_ALL}; + HDF5Constants.H5F_OBJ_GROUP, HDF5Constants.H5F_OBJ_DATATYPE, + HDF5Constants.H5F_OBJ_ATTR, HDF5Constants.H5F_OBJ_ALL}; long H5fid = HDF5Constants.H5I_INVALID_HID; private final void _deleteFile(String filename) diff --git a/java/test/TestH5G.java b/java/test/TestH5G.java index 1f6da3120ae..65fadde95c3 100644 --- a/java/test/TestH5G.java +++ b/java/test/TestH5G.java @@ -37,7 +37,7 @@ public class TestH5G { private static final String H5_FILE = "testG.h5"; private static final String H5_FILE2 = "testG2.h5"; private static final String[] GROUPS = {"/G1", "/G1/G11", "/G1/G12", "/G1/G11/G111", - "/G1/G11/G112", "/G1/G11/G113", "/G1/G11/G114"}; + "/G1/G11/G112", "/G1/G11/G113", "/G1/G11/G114"}; private static final String[] GROUPS2 = {"/G1", "/G1/G14", "/G1/G12", "/G1/G13", "/G1/G11"}; long H5fid = HDF5Constants.H5I_INVALID_HID; long H5fid2 = HDF5Constants.H5I_INVALID_HID; diff --git a/java/test/TestH5Plist.java b/java/test/TestH5Plist.java index d8a429b6418..c2a9862eb8e 100644 --- a/java/test/TestH5Plist.java +++ b/java/test/TestH5Plist.java @@ -73,7 +73,7 @@ public class TestH5Plist { private static final String PROP3_NAME = "Property 3"; private static final char[] prop3_def = {'T', 'e', 'n', ' ', 'c', - 'h', 'a', 'r', 's', ' '}; // Property 3 default value + 'h', 'a', 'r', 's', ' '}; // Property 3 default value private static final int PROP3_SIZE = 10; private static final String PROP4_NAME = "Property 4"; diff --git a/java/test/TestH5VL.java b/java/test/TestH5VL.java index 99505ae8e51..666219114fe 100644 --- a/java/test/TestH5VL.java +++ b/java/test/TestH5VL.java @@ -98,7 +98,8 @@ public void testH5VLget_connector_id() */ String connector = System.getenv("HDF5_VOL_CONNECTOR"); if (connector == null) - assertEquals(HDF5Constants.H5VL_NATIVE, native_id); + assertTrue("H5.H5VLcmp_connector_cls(H5VL_NATIVE_NAME, native_id)", + H5.H5VLcmp_connector_cls(HDF5Constants.H5VL_NATIVE, native_id)); } catch (Throwable err) { err.printStackTrace(); @@ -122,7 +123,8 @@ public void testH5VLget_connector_id_by_name() try { long native_id = H5.H5VLget_connector_id_by_name(HDF5Constants.H5VL_NATIVE_NAME); assertTrue("H5.H5VLget_connector_id_by_name H5VL_NATIVE_NAME", native_id >= 0); - assertEquals(HDF5Constants.H5VL_NATIVE, native_id); + assertTrue("H5.H5VLcmp_connector_cls(H5VL_NATIVE_NAME, native_id)", + H5.H5VLcmp_connector_cls(HDF5Constants.H5VL_NATIVE, native_id)); } catch (Throwable err) { err.printStackTrace(); @@ -136,7 +138,8 @@ public void testH5VLget_connector_id_by_value() try { long native_id = H5.H5VLget_connector_id_by_value(HDF5Constants.H5VL_NATIVE_VALUE); assertTrue("H5.H5VLget_connector_id_by_value H5VL_NATIVE_VALUE", native_id >= 0); - assertEquals(HDF5Constants.H5VL_NATIVE, native_id); + assertTrue("H5.H5VLcmp_connector_cls(H5VL_NATIVE_NAME, native_id)", + H5.H5VLcmp_connector_cls(HDF5Constants.H5VL_NATIVE, native_id)); } catch (Throwable err) { err.printStackTrace(); diff --git a/release_docs/HISTORY-1_14_0-1_16_0.txt b/release_docs/HISTORY-1_14_0-1_16_0.txt new file mode 100644 index 00000000000..815dbfa8fe3 --- /dev/null +++ b/release_docs/HISTORY-1_14_0-1_16_0.txt @@ -0,0 +1,6387 @@ +HDF5 History +============ + +This file contains development history of the HDF5 1.14 branch + +06. Release Information for hdf5-1.14.5 +05. Release Information for hdf5-1.14.4 +04. Release Information for hdf5-1.14.3 +03. Release Information for hdf5-1.14.2 +02. Release Information for hdf5-1.14.1 +01. Release Information for hdf5-1.14.0 + +[Search on the string '%%%%' for section breaks of each release.] + +%%%%1.14.5%%%% + +HDF5 version 1.14.5 released on 2024-09-30 +================================================================================ + + +INTRODUCTION +============ + +This document describes the differences between this release and the previous +HDF5 release. It contains information on the platforms tested and known +problems in this release. For more details check the HISTORY*.txt files in the +HDF5 source. + +Note that documentation in the links below will be updated at the time of each +final release. + +Links to HDF5 documentation can be found on: + + https://support.hdfgroup.org/releases/hdf5/latest-docs.html + +The official HDF5 releases can be obtained from: + + https://support.hdfgroup.org/downloads/index.html + +Changes from Release to Release and New Features in the HDF5-1.14.x release series +can be found at: + + https://support.hdfgroup.org/releases/hdf5/documentation/release_specific_info.md + +If you have any questions or comments, please send them to the HDF Help Desk: + + help@hdfgroup.org + + +CONTENTS +======== + +- New Features +- Support for new platforms and languages +- Bug Fixes since HDF5-1.14.4 +- Platforms Tested +- Known Problems +- CMake vs. Autotools installations + + +New Features +============ + + Configuration: + ------------- + - Added signed Windows msi binary and signed Apple dmg binary files. + + The release process now provides signed Windows and Mac installation + binaries in addition to the Debian and rpm installation binaries. The Mac + binaries are built as universal binaries on an ARM-based Mac. Installer + files are no longer compressed into packaged archives. + + - Moved examples to the HDF5Examples folder in the source tree. + + Moved the C++ and Fortran examples from the examples folder to the HDF5Examples + folder and renamed to TUTR, tutorial. This is referenced from the LearnBasics + doxygen page. + + - Added support for using zlib-ng package as the zlib library: + + CMake: HDF5_USE_ZLIB_NG + Autotools: --enable-zlibng + + Added the option HDF5_USE_ZLIB_NG to allow the replacement of the + default ZLib package by the zlib-ng package as a built-in compression library. + + - Disable CMake UNITY_BUILD for hdf5 + + CMake added a target property, UNITY_BUILD, that when set to true, the target + source files will be combined into batches for faster compilation. By default, + the setting is OFF, but could be enabled by a project that includes HDF5 as a subproject. + + HDF5 has disabled this feature by setting the property to OFF in the HDFMacros.cmake file. + + - Removed "function/code stack" debugging configuration option: + + CMake: HDF5_ENABLE_CODESTACK + Autotools: --enable-codestack + + This was used to debug memory leaks internal to the library, but has been + broken for >1.5 years and is now easily replaced with third-party tools + (e.g. libbacktrace: https://github.com/ianlancetaylor/libbacktrace) on an + as-needed basis when debugging an issue. + + - Added configure options for enabling/disabling non-standard programming + language features + + - Added the CMake variable HDF5_ENABLE_ROS3_VFD to the HDF5 CMake config + file hdf5-config.cmake. This allows it to easily detect if the library + has been built with or without read-only S3 functionality. + + + Library: + -------- + - Added new routines for interacting with error stacks: H5Epause_stack, + H5Eresume_stack, and H5Eis_paused. These routines can be used to + indicate that errors from a call to an HDF5 routine should not be + pushed on to an error stack. Primarily targeted toward third-party + developers of Virtual File Drivers (VFDs) and Virtual Object Layer (VOL) + connectors, these routines allow developers to perform "speculative" + operations (such as trying to open a file or object) without requiring + that the error stack be cleared after a speculative operation fails. + + + Parallel Library: + ----------------- + - + + + Fortran Library: + ---------------- + + - Add Fortran H5R APIs: + h5rcreate_attr_f, h5rcreate_object_f, h5rcreate_region_f, + h5ropen_attr_f, h5ropen_object_f, h5ropen_region_f, + h5rget_file_name_f, h5rget_attr_name_f, h5rget_obj_name_f, + h5rcopy_f, h5requal_f, h5rdestroy_f, h5rget_type_f + + + C++ Library: + ------------ + - + + + Java Library: + ------------- + - + + + Tools: + ------ + - Added doxygen files for the tools + + Implement the tools usage text as pages in doxygen. + + - Added option to adjust the page buffer size in tools + + The page buffer cache size for a file can now be adjusted using the + --page-buffer-size=N + option in the h5repack, h5diff, h5dump, h5ls, and h5stat tools. This + will call the H5Pset_page_buffer_size() API function with the specified + size in bytes. + + - Allowed h5repack to reserve space for a user block without a file + + This is useful for users who want to reserve space in the file for + future use without requiring a file to copy. + + + High-Level APIs: + ---------------- + - + + + C Packet Table API: + ------------------- + - + + + Internal header file: + --------------------- + - + + + Documentation: + -------------- + - Documented that leaving HDF5 threads running at termination is unsafe + + Added doc/threadsafety-warning.md as a warning that threads which use HDF5 + resources must be closed before either process exit or library close. + If HDF5 threads are alive during either of these operations, their resources + will not be cleaned up properly and undefined behavior is possible. + + This document also includes a discussion on potential ways to mitigate this issue. + + + +Support for new platforms, languages and compilers +================================================== + - + + +Bug Fixes since HDF5-1.14.4 release +=================================== + Library + ------- + - Fixed a memory leak in H5F__accum_write() + + The memory was allocated in H5F__accum_write() and was to be freed in + H5F__accum_reset() during the closing process but a failure occurred just + before the deallocation, leaving the memory un-freed. The problem is + now fixed. + + Fixes GitHub #4585 + + - Fixed an incorrect returned value by H5LTfind_dataset() + + H5LTfind_dataset() returned true for non-existing datasets because it only + compared up to the length of the searched string, such as "Day" vs "DayNight". + Applied the user's patch to correct this behavior. + + Fixes GitHub #4780 + + - Fixed a segfault by H5Gmove2, extended to fix H5Lcopy and H5Lmove + + A user's application segfaulted when it passed in an invalid location ID + to H5Gmove2. The src and dst location IDs must be either a file or a group + ID. The fix was also applied to H5Lcopy and H5Lmove. Now, all these + three functions will fail if either the src or dst location ID is not a file + or a group ID. + + Fixes GitHub #4737 + + - Fixed a segfault by H5Lget_info() + + A user's program generated a segfault when the ID passed into H5Lget_info() + was a datatype ID. This was caused by non-VOL functions being used internally + where VOL functions should have been. This correction was extended to many + other functions to prevent potential issue in the future. + + Fixes GitHub #4730 + + - Fixed a segfault by H5Fget_intent(), extended to fix several other functions + + A user's program generated a segfault when the ID passed into H5Fget_intent() + was not a file ID. In addition to H5Fget_intent(), a number of APIs also failed + to detect an incorrect ID being passed in, which can potentially cause various + failures, including segfault. The affected functions are listed below and now + properly detect incorrect ID parameters: + + H5Fget_intent() + H5Fget_fileno() + H5Fget_freespace() + H5Fget_create_plist() + H5Fget_access_plist() + H5Fget_vfd_handle() + H5Dvlen_get_buf_size() + H5Fget_mdc_config() + H5Fset_mdc_config() + H5Freset_mdc_hit_rate_stats() + + Fixes GitHub #4656 and GitHub #4662 + + - Fixed a bug with large external datasets + + When performing a large I/O on an external dataset, the library would only + issue a single read or write system call. This could cause errors or cause + the data to be incorrect. These calls do not guarantee that they will + process the entire I/O request, and may need to be called multiple times + to complete the I/O, advancing the buffer and reducing the size by the + amount actually processed by read or write each time. Implemented this + algorithm for external datasets in both the read and write cases. + + Fixes GitHub #4216 + Fixes h5py GitHub #2394 + + - Fixed a bug in the Subfiling VFD that could cause a buffer over-read + and memory allocation failures + + When performing vector I/O with the Subfiling VFD, making use of the + vector I/O size extension functionality could cause the VFD to read + past the end of the "I/O sizes" array that is passed in. When an entry + in the "I/O sizes" array has the value 0 and that entry is at an array + index greater than 0, this signifies that the value in the preceding + array entry should be used for the rest of the I/O vectors, effectively + extending the last valid I/O size across the remaining entries. This + allows an application to save a bit on memory by passing in a smaller + "I/O sizes" array. The Subfiling VFD didn't implement a check for this + functionality in the portion of the code that generates I/O vectors, + causing it to read past the end of the "I/O sizes" array when it was + shorter than expected. This could also result in memory allocation + failures, as the nearby memory allocations are based off the values + read from that array, which could be uninitialized. + + - Fixed H5Rget_attr_name to return the length of the attribute's name + without the null terminator + + H5Rget_file_name and H5Rget_obj_name both return the name's length + without the null terminator. H5Rget_attr_name now behaves consistently + with the other two APIs. Going forward, all the get character string + APIs in HDF5 will be modified/written in this manner, regarding the + length of a character string. + + Fixes GitHub #4447 + + - Fixed heap-buffer-overflow in h5dump + + h5dump aborted when provided with a malformed input file. The was because + the buffer size for checksum was smaller than H5_SIZEOF_CHKSUM, causing + an overflow while calculating the offset to the checksum in the buffer. + A check was added so H5F_get_checksums would fail appropriately in all + of its occurrences. + + Fixes GitHub #4434 + + - Fixed library to allow usage of page buffering feature for serial file + access with parallel builds of HDF5 + + When HDF5 is built with parallel support enabled, previously the library would + disallow any usage of page buffering, even if a file was not opened with + parallel access. The library now allows usage of page buffering for serial + file access with parallel builds of HDF5. Usage of page buffering is still + disabled for any form of parallel file access, even if only 1 MPI process + is used. + + - Fixed function H5Requal to actually compare the reference pointers + + Fixed an issue with H5Requal always returning true because the + function was only comparing the ref2_ptr to itself. + + - Fixed infinite loop closing library issue when h5dump with a user provided test file + + The library's metadata cache calls the "get_final_load_size" client callback + to find out the actual size of the object header. As the size obtained + exceeds the file's EOA, it throws an error but the object header structure + allocated through the client callback is not freed, causing the issue + described. + + (1) Free the structure allocated in the object header client callback after + saving the needed information in udata. (2) Deserialize the object header + prefix in the object header's "deserialize" callback regardless. + + Fixes GitHub #3790 + + + Java Library + ------------ + - + + + Configuration + ------------- + - Fixed usage issue with FindZLIB.cmake module + + When building HDF5 with CMake and relying on the FindZLIB.cmake module, + the Find module would correctly find the ZLIB library but not set an OUTPUT_NAME + on the target. Also, the target returned, ZLIB::ZLIB, was not in the ZLIB_LIBRARIES + variable. This caused issues when requesting the OUTPUT_NAME of the target in + the pkg-config settings. + + Similar to HDF5_USE_LIBAEC_STATIC, "Find static AEC library", option, we added + a new option, HDF5_USE_ZLIB_STATIC, "Find static zlib library". These options + allow a user to specify whether to use a static or shared version of the compression + library in a find_package call. + + - Corrected usage of FetchContent in the HDFLibMacros.cmake file. + + CMake version 3.30 changed the behavior of the FetchContent module to deprecate + the use of FetchContent_Populate() in favor of FetchContent_MakeAvailable(). Therefore, + the copying of HDF specialized CMakeLists.txt files to the dependent project's source + was implemented in the FetchContent_Declare() call. + + - Fixed/reverted an Autotools configure hack that causes problems on MacOS + + A sed line in configure.ac was added in the past to paper over some + problems with older versions of the Autotools that would add incorrect + linker flags. This used the -i option in a way that caused silent + errors on MacOS that did not break the build. + + The original fix for this problem (in 1.14.4) removed the sed line + entirely, but it turns out that the sed cleanup is still necessary + on some systems, where empty -l options will be added to the libtool + script. + + This sed line has been restored and reworked to not use -i. + + Fixes GitHub issues #3843 and #4448 + + - Fixed a list index out of range issue in the runTest.cmake file + + Fixed an issue in config/cmake/runTest.cmake where the CMake logic + would try to access an invalid list index if the number of lines in + a test's output and reference files don't match. + + - Fix Autotools -Werror cleanup + + The Autotools temporarily scrub -Werror(=whatever) from CFLAGS, etc. + so configure checks don't trip over warnings generated by configure + check programs. The sed line originally only scrubbed -Werror but not + -Werror=something, which would cause errors when the '=something' was + left behind in CFLAGS. + + The sed line has been updated to handle -Werror=something lines. + + Fixes one issue raised in #3872 + + - Changed default of 'Error on HDF5 doxygen warnings' DOXYGEN_WARN_AS_ERROR option. + + The default setting of DOXYGEN_WARN_AS_ERROR to 'FAIL_ON_WARNINGS' has been changed + to 'NO'. It was decided that the setting was too aggressive and should be a user choice. + The github actions and scripts have been updated to reflect this. + + * HDF5_ENABLE_DOXY_WARNINGS: ON/OFF (Default: OFF) + * --enable-doxygen-errors: enable/disable (Default: disable) + + + Tools + ----- + - Fixed several issues in ph5diff + + The parallel logic for the ph5diff tool inside the shared h5diff code was + refactored and cleaned up to fix several issues with the ph5diff tool. This + fixed: + + - several concurrency issues in ph5diff that can result in interleaved + output, + - an issue where output can sometimes be dropped when it ends up in + ph5diff's output overflow file, and + - an issue where MPI_Init was called after HDF5 had been initialized, + preventing the library from setting up an MPI communicator attribute + to perform library cleanup on MPI_Finalize. + + + Performance + ------------- + - + + + Fortran API + ----------- + - + + + High-Level Library + ------------------ + - + + + Fortran High-Level APIs + ----------------------- + - + + + Documentation + ------------- + - + + + F90 APIs + -------- + - + + + C++ APIs + -------- + - + + + Testing + ------- + - + + +Platforms Tested +=================== + + - HDF5 is tested with the two latest macOS versions that are available + on github runners. As new major macOS versions become available, HDF5 + will discontinue support for the older version and add the new latest + version to its list of compatible systems, along with the previous + version. + + Linux 6.8.0-1010-aws GNU gcc, gfortran, g++ + #10-Ubuntu SMP 2024 x86_64 (Ubuntu 13.2.0-23ubuntu4) 13.2.0 + GNU/Linux Ubuntu 24.04 Ubuntu clang version 18.1.3 (1ubuntu1) + Intel(R) oneAPI DPC++/C++ Compiler 2024.2.0 + ifx (IFX) 2024.2.0 20240602 + (cmake and autotools) + + Linux 6.5.0-1018-aws GNU gcc, gfortran, g++ + #18-Ubuntu SMP x86_64 GNU/Linux (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 + Ubuntu 22.04 Ubuntu clang version 14.0.0-1ubuntu1 + Intel(R) oneAPI DPC++/C++ Compiler 2024.0.2 + ifx (IFX) 2024.0.2 20231213 + (cmake and autotools) + + Linux 5.14.21-cray_shasta_c cray-mpich/8.1.28 + #1 SMP x86_64 GNU/Linux cce/15.0.0 + (frontier) gcc/13.2 + (cmake) + + Linux 5.14.0-427.24.1.el9_4 GNU gcc, gfortran, g++ (Red Hat 11.4.1-3) + #1 SMP x86_64 GNU/Linux clang version 17.0.6 + Rocky 9 Intel(R) oneAPI DPC++/C++ Compiler 2024.2.0 + ifx (IFX) 2024.2.0 + (cmake and autotools) + + Linux-4.18.0-553.16.1.1toss.t4 openmpi/4.1.2 + #1 SMP x86_64 GNU/Linux clang 14.0.6 + (corona, dane) GCC 12.1.1 + Intel(R) oneAPI DPC++/C++ Compiler 2023.2.1 + ifx (IFX) 2023.2.1 + + Linux-4.18.0-553.5.1.1toss.t4 openmpi/4.1/4.1.6 + #1 SMP x86_64 GNU/Linux clang 16.0.6 + (eclipse) GCC 12.3.0 + Intel(R) oneAPI DPC++/C++ Compiler 2024.0.2 + ifx (IFX) 2024.0.2 + (cmake) + + Linux 4.14.0-115.35.1.3chaos spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 17.0.6 + (vortex) GCC 12.2.1 + nvhpc 24.1 + XL 2023.06.28 + (cmake) + + Linux-4.14.0-115.35.1 spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 14.0.5, 15.0.6 + (lassen) GCC 8.3.1 + XL 2021.09.22, 2022.08.05 + (cmake) + + Linux 3.10.0-1160.36.2.el7.ppc64 gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + #1 SMP ppc64be GNU/Linux g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + Power8 (echidna) GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + + Linux 3.10.0-1160.80.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos7 Version 4.8.5 20150623 (Red Hat 4.8.5-4) + (jelly/kituo/moohan) Version 4.9.3, Version 7.2.0, Version 8.3.0, + Version 9.1.0, Version 10.2.0 + Intel(R) C (icc), C++ (icpc), Fortran (icc) + compilers: + Version 17.0.0.098 Build 20160721 + GNU C (gcc) and C++ (g++) 4.8.5 compilers + with NAG Fortran Compiler Release 7.1(Hanzomon) + Intel(R) C (icc) and C++ (icpc) 17.0.0.098 compilers + with NAG Fortran Compiler Release 7.1(Hanzomon) + MPICH 3.1.4 compiled with GCC 4.9.3 + MPICH 3.3 compiled with GCC 7.2.0 + OpenMPI 3.1.3 compiled with GCC 7.2.0 and 4.1.2 + compiled with GCC 9.1.0 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Versions 18.4.0 and 19.10-0 + NVIDIA nvc, nvfortran and nvc++ version 22.5-0 + (autotools and cmake) + + + Linux-3.10.0-1160.119.1.1chaos openmpi/4.1.4 + #1 SMP x86_64 GNU/Linux clang 16.0.6 + (skybridge) Intel(R) oneAPI DPC++/C++ Compiler 2023.2.0 + ifx (IFX) 2023.2.0 + (cmake) + + Linux-3.10.0-1160.90.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux clang 16.0.6 + (attaway) GCC 12.1.0 + Intel(R) oneAPI DPC++/C++ Compiler 2024.0.2 + ifx (IFX) 2024.0.2 + (cmake) + + Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos6 Version 4.4.7 20120313 + (platypus) Version 4.9.3, 5.3.0, 6.2.0 + MPICH 3.1.4 compiled with GCC 4.9.3 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Version 19.10-0 + + Windows 10 x64 Visual Studio 2019 w/ clang 12.0.0 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2019 w/ Intel (C/C++ only - cmake) + Visual Studio 2022 w/ clang 17.0.3 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2022 w/ Intel C/C++ oneAPI 2023 (cmake) + Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake) + + +Known Problems +============== + + - When building with the NAG Fortran compiler using the Autotools and libtool + 2.4.2 or earlier, the -shared flag will be missing '-Wl,', which will cause + compilation to fail. This is due to a bug in libtool that was fixed in 2012 + and released in 2.4.4 in 2014. + + - When the library detects and builds in support for the _Float16 datatype, an + issue has been observed on at least one MacOS 14 system where the library + fails to initialize due to not being able to detect the byte order of the + _Float16 type (https://github.com/HDFGroup/hdf5/issues/4310): + + #5: H5Tinit_float.c line 308 in H5T__fix_order(): failed to detect byte order + major: Datatype + minor: Unable to initialize object + + If this issue is encountered, support for the _Float16 type can be disabled + with a configuration option: + + CMake: HDF5_ENABLE_NONSTANDARD_FEATURE_FLOAT16=OFF + Autotools: --disable-nonstandard-feature-float16 + + - When HDF5 is compiled with NVHPC versions 23.5 - 23.9 (additional versions may + also be applicable) and with -O2 (or higher) and -DNDEBUG, test failures occur + in the following tests: + + H5PLUGIN-filter_plugin + H5TEST-flush2 + H5TEST-testhdf5-base + MPI_TEST_t_filters_parallel + + Sporadic failures (even with lower -O levels): + Java JUnit-TestH5Pfapl + Java JUnit-TestH5D + + Also, NVHPC will fail to compile the test/tselect.c test file with a compiler + error of 'use of undefined value' when the optimization level is -O2 or higher. + + This is confirmed to be a bug in the nvc compiler that has been fixed as of + 23.11. If you are using an affected version of the NVidia compiler, the + work-around is to set the optimization level to -O1. + + https://forums.developer.nvidia.com/t/hdf5-no-longer-compiles-with-nv-23-9/269045 + + - CMake files do not behave correctly with paths containing spaces. + Do not use spaces in paths because the required escaping for handling spaces + results in very complex and fragile build files. + + - At present, metadata cache images may not be generated by parallel + applications. Parallel applications can read files with metadata cache + images, but since this is a collective operation, a deadlock is possible + if one or more processes do not participate. + + - The subsetting option in ph5diff currently will fail and should be avoided. + The subsetting option works correctly in serial h5diff. + + - Flang Fortran compilation will fail (last check version 17) due to not yet + implemented: (1) derived type argument passed by value (H5VLff.F90), + and (2) support for REAL with KIND = 2 in intrinsic SPACING used in testing. + + - Fortran tests HDF5_1_8.F90 and HDF5_F03.F90 will fail with Cray compilers + greater than version 16.0 due to a compiler bug. The latest version verified + as failing was version 17.0. + + - Several tests currently fail on certain platforms: + MPI_TEST-t_bigio fails with spectrum-mpi on ppc64le platforms. + + MPI_TEST-t_subfiling_vfd and MPI_TEST_EXAMPLES-ph5_subfiling fail with + cray-mpich on theta and with XL compilers on ppc64le platforms. + + MPI_TEST_testphdf5_tldsc fails with cray-mpich 7.7 on cori and theta. + + - File space may not be released when overwriting or deleting certain nested + variable length or reference types. + + - Known problems in previous releases can be found in the HISTORY*.txt files + in the HDF5 source. Please report any new problems found to + help@hdfgroup.org. + + +CMake vs. Autotools installations +================================= +While both build systems produce similar results, there are differences. +Each system produces the same set of folders on Linux (only CMake works +on standard Windows); bin, include, lib and share. Autotools places the +COPYING and RELEASE.txt file in the root folder, CMake places them in +the share folder. + +The bin folder contains the tools and the build scripts. Additionally, CMake +creates dynamic versions of the tools with the suffix "-shared". Autotools +installs one set of tools depending on the "--enable-shared" configuration +option. + build scripts + ------------- + Autotools: h5c++, h5cc, h5fc + CMake: h5c++, h5cc, h5hlc++, h5hlcc + +The include folder holds the header files and the fortran mod files. CMake +places the fortran mod files into separate shared and static subfolders, +while Autotools places one set of mod files into the include folder. Because +CMake produces a tools library, the header files for tools will appear in +the include folder. + +The lib folder contains the library files, and CMake adds the pkgconfig +subfolder with the hdf5*.pc files used by the bin/build scripts created by +the CMake build. CMake separates the C interface code from the fortran code by +creating C-stub libraries for each Fortran library. In addition, only CMake +installs the tools library. The names of the szip libraries are different +between the build systems. + +The share folder will have the most differences because CMake builds include +a number of CMake specific files for support of CMake's find_package and support +for the HDF5 Examples CMake project. + +The issues with the gif tool are: + HDFFV-10592 CVE-2018-17433 + HDFFV-10593 CVE-2018-17436 + HDFFV-11048 CVE-2020-10809 +These CVE issues have not yet been addressed and are avoided by not building +the gif tool by default. Enable building the High-Level tools with these options: + autotools: --enable-hlgiftools + cmake: HDF5_BUILD_HL_GIF_TOOLS=ON + + +%%%%1.14.4%%%% + +HDF5 version 1.14.4-2 released on 2024-04-15 +================================================================================ + + +INTRODUCTION +============ + +This document describes the differences between this release and the previous +HDF5 release. It contains information on the platforms tested and known +problems in this release. For more details check the HISTORY*.txt files in the +HDF5 source. + +Note that documentation in the links below will be updated at the time of each +final release. + +Links to HDF5 documentation can be found on: + + https://portal.hdfgroup.org/documentation/ + +The official HDF5 releases can be obtained from: + + https://www.hdfgroup.org/downloads/hdf5/ + +Changes from release to release and new features in the HDF5-1.14.x release series +can be found at: + + https://portal.hdfgroup.org/documentation/hdf5-docs/release_specific_info.html + +If you have any questions or comments, please send them to the HDF Help Desk: + + help@hdfgroup.org + + +CONTENTS +======== + +- New Features +- Support for new platforms and languages +- Bug Fixes since HDF5-1.14.3 +- Platforms Tested +- Known Problems +- CMake vs. Autotools installations + + +New Features +============ + + Configuration: + ------------- + - Added configure options for enabling/disabling non-standard programming + language features + + * Added a new configuration option that allows enabling or disabling of + support for features that are extensions to programming languages, such + as support for the _Float16 datatype: + + CMake: HDF5_ENABLE_NONSTANDARD_FEATURES (ON/OFF) (Default: ON) + Autotools: --enable-nonstandard-features (yes/no) (Default: yes) + + When this option is enabled, configure time checks are still performed + to ensure that a feature can be used properly, but these checks may not + be sufficient when compiler support for a feature is incomplete or broken, + resulting in library build failures. When set to OFF/no, this option + provides a way to disable support for all non-standard features to avoid + these issues. Individual features can still be re-enabled with their + respective configuration options. + + * Added a new configuration option that allows enabling or disabling of + support for the _Float16 C datatype: + + CMake: HDF5_ENABLE_NONSTANDARD_FEATURE_FLOAT16 (ON/OFF) (Default: ON) + Autotools: --enable-nonstandard-feature-float16 (yes/no) (Default: yes) + + While support for the _Float16 C datatype can generally be detected and + used properly, some compilers have incomplete support for the datatype + and will pass configure time checks while still failing to build HDF5. + This option provides a way to disable support for the _Float16 datatype + when the compiler doesn't have the proper support for it. + + - Deprecate bin/cmakehdf5 script + + With the improvements made in CMake since version 3.23 and the addition + of CMake preset files, this script is no longer necessary. + + See INSTALL_CMake.txt file, Section X: Using CMakePresets.json for compiling + + - Overhauled LFS support checks + + In 2024, we can assume that Large File Support (LFS) exists on all + systems we support, though it may require flags to enable it, + particularly when building 32-bit binaries. The HDF5 source does + not use any of the 64-bit specific API calls (e.g., ftello64) + or explicit 64-bit offsets via off64_t. + + Autotools + + * We now use AC_SYS_LARGEFILE to determine how to support LFS. We + previously used a custom m4 script for this. + + CMake + + * The HDF_ENABLE_LARGE_FILE option (advanced) has been removed + * We no longer run a test program to determine if LFS works, which + will help with cross-compiling + * On Linux we now unilaterally set -D_LARGEFILE_SOURCE and + -D_FILE_OFFSET_BITS=64, regardless of 32/64 bit system. CMake + doesn't offer a nice equivalent to AC_SYS_LARGEFILE and since + those options do nothing on 64-bit systems, this seems safe and + covers all our bases. We don't set -D_LARGEFILE64_SOURCE since + we don't use any of the POSIX 64-bit specific API calls like + ftello64, as noted above. + * We didn't test for LFS support on non-Linux platforms. We've added + comments for how LFS should probably be supported on AIX and Solaris, + which seem to be alive, though uncommon. PRs would be appreciated if + anyone wishes to test this. + + This overhaul also fixes GitHub #2395, which points out that the LFS flags + used when building with CMake differ based on whether CMake has been + run before. The LFS check program that caused this problem no longer exists. + + - The CMake HDF5_ENABLE_DEBUG_H5B option has been removed + + This enabled some additional version-1 B-tree checks. These have been + removed so the option is no longer necessary. + + This option was CMake-only and marked as advanced. + + - New option for building with static CRT in Windows + + The following option has been added: + HDF5_BUILD_STATIC_CRT_LIBS "Build With Static Windows CRT Libraries" OFF + Because our minimum CMake is 3.18, the macro to change runtime flags no longer + works as CMake changed the default behavior in CMake 3.15. + + Fixes GitHub issue #3984 + + - Added support for the new MSVC preprocessor + + Microsoft added support for a new, standards-conformant preprocessor + to MSVC, which can be enabled with the /Zc:preprocessor option. This + preprocessor would trip over our HDopen() variadic function-like + macro, which uses a feature that only works with the legacy preprocessor. + + ifdefs have been added that select the correct HDopen() form and + allow building HDF5 with the /Zc:preprocessor option. + + The HDopen() macro is located in an internal header file and only + affects building the HDF5 library from source. + + Fixes GitHub #2515 + + - Renamed HDF5_ENABLE_USING_MEMCHECKER to HDF5_USING_ANALYSIS_TOOL + + The HDF5_USING_ANALYSIS_TOOL is used to indicate to test macros that + an analysis tool is being used and that the tests should not use + the runTest.cmake macros and it's variations. The analysis tools, + like valgrind, test the macro code instead of the program under test. + + HDF5_ENABLE_USING_MEMCHECKER is still used for controlling the HDF5 + define, H5_USING_MEMCHECKER. + + - New option for building and naming tools in CMake + + The following option has been added: + HDF5_BUILD_STATIC_TOOLS "Build Static Tools Not Shared Tools" OFF + + The default will build shared tools unless BUILD_SHARED_LIBS = OFF. + Tools will no longer have "-shared" as only one set of tools will be created. + + - Incorporated HDF5 examples repository into HDF5 library. + + The HDF5Examples folder is equivalent to the hdf5-examples repository. + This enables building and testing the examples + during the library build process or after the library has been installed. + Previously, the hdf5-examples archives were downloaded + for packaging with the library. Now the examples can be built + and tested without a packaged install of the library. + + However, to maintain the ability to use the HDF5Examples with an installed + library, it is necessary to map the option names used by the library + to those used by the examples. The typical pattern is: + = + HDF_BUILD_FORTRAN = ${HDF5_BUILD_FORTRAN} + + - Added new option for CMake to mark tests as SKIPPED. + + HDF5_DISABLE_TESTS_REGEX is a REGEX string that will be checked with + test names and if there is a match then that test's property will be + set to DISABLED. HDF5_DISABLE_TESTS_REGEX can be initialized on the + command line: "-DHDF5_DISABLE_TESTS_REGEX:STRING=" + See CMake documentation for regex-specification. + + - Added defaults to CMake for long double conversion checks + + HDF5 performs a couple of checks at build time to see if long double + values can be converted correctly (IBM's Power architecture uses a + special format for long doubles). These checks were performed using + TRY_RUN, which is a problem when cross-compiling. + + These checks now use default values appropriate for most non-Power + systems when cross-compiling. The cache values can be pre-set if + necessary, which will preempt both the TRY_RUN and the default. + + Affected values: + H5_LDOUBLE_TO_LONG_SPECIAL (default no) + H5_LONG_TO_LDOUBLE_SPECIAL (default no) + H5_LDOUBLE_TO_LLONG_ACCURATE (default yes) + H5_LLONG_TO_LDOUBLE_CORRECT (default yes) + H5_DISABLE_SOME_LDOUBLE_CONV (default no) + + Fixes GitHub #3585 + + + Library: + -------- + - Relaxed behavior of H5Pset_page_buffer_size() when opening files + + This API call sets the size of a file's page buffer cache. This call + was extremely strict about matching its parameters to the file strategy + and page size used to create the file, requiring a separate open of the + file to obtain these parameters. + + These requirements have been relaxed when using the fapl to open + a previously-created file: + + * When opening a file that does not use the H5F_FSPACE_STRATEGY_PAGE + strategy, the setting is ignored and the file will be opened, but + without a page buffer cache. This was previously an error. + + * When opening a file that has a page size larger than the desired + page buffer cache size, the page buffer cache size will be increased + to the file's page size. This was previously an error. + + The behavior when creating a file using H5Pset_page_buffer_size() is + unchanged. + + Fixes GitHub issue #3382 + + - Added support for _Float16 16-bit half-precision floating-point datatype + + Support for the _Float16 C datatype has been added on platforms where: + + - The _Float16 datatype and its associated macros (FLT16_MIN, FLT16_MAX, + FLT16_EPSILON, etc.) are available + - A simple test program that converts between the _Float16 datatype and + other datatypes with casts can be successfully compiled and run at + configure time. Some compilers appear to be buggy or feature-incomplete + in this regard and will generate calls to compiler-internal functions + for converting between the _Float16 datatype and other datatypes, but + will not link these functions into the build, resulting in build + failures. + + The following new macros have been added: + + H5_HAVE__FLOAT16 - This macro is defined in H5pubconf.h and will have + the value 1 if support for the _Float16 datatype is + available. It will not be defined otherwise. + + H5_SIZEOF__FLOAT16 - This macro is defined in H5pubconf.h and will have + a value corresponding to the size of the _Float16 + datatype, as computed by sizeof(). It will have the + value 0 if support for the _Float16 datatype is not + available. + + H5_HAVE_FABSF16 - This macro is defined in H5pubconf.h and will have the + value 1 if the fabsf16 function is available for use. + + H5_LDOUBLE_TO_FLOAT16_CORRECT - This macro is defined in H5pubconf.h and + will have the value 1 if the platform can + correctly convert long double values to + _Float16. Some compilers have issues with + this. + + H5T_NATIVE_FLOAT16 - This macro maps to the ID of an HDF5 datatype representing + the native C _Float16 datatype for the platform. If + support for the _Float16 datatype is not available, the + macro will map to H5I_INVALID_HID and should not be used. + + H5T_IEEE_F16BE - This macro maps to the ID of an HDF5 datatype representing + a big-endian IEEE 754 16-bit floating-point datatype. This + datatype is available regardless of whether _Float16 support + is available or not. + + H5T_IEEE_F16LE - This macro maps to the ID of an HDF5 datatype representing + a little-endian IEEE 754 16-bit floating-point datatype. + This datatype is available regardless of whether _Float16 + support is available or not. + + The following new hard datatype conversion paths have been added, but + will only be used when _Float16 support is available: + + H5T_NATIVE_SCHAR <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_UCHAR <-> H5T_NATIVE_FLOAT16 + H5T_NATIVE_SHORT <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_USHORT <-> H5T_NATIVE_FLOAT16 + H5T_NATIVE_INT <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_UINT <-> H5T_NATIVE_FLOAT16 + H5T_NATIVE_LONG <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_ULONG <-> H5T_NATIVE_FLOAT16 + H5T_NATIVE_LLONG <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_ULLONG <-> H5T_NATIVE_FLOAT16 + H5T_NATIVE_FLOAT <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_DOUBLE <-> H5T_NATIVE_FLOAT16 + H5T_NATIVE_LDOUBLE <-> H5T_NATIVE_FLOAT16 + + The H5T_NATIVE_LDOUBLE -> H5T_NATIVE_FLOAT16 hard conversion path will only + be available and used if H5_LDOUBLE_TO_FLOAT16_CORRECT has a value of 1. Otherwise, + the conversion will be emulated in software by the library. + + Note that in the absence of any compiler flags for architecture-specific + tuning, the generated code for datatype conversions with the _Float16 type + may perform conversions by first promoting the type to float. Use of + architecture-specific tuning compiler flags may instead allow for the + generation of specialized instructions, such as AVX512-FP16 instructions, + if available. + + - Made several improvements to the datatype conversion code + + * The datatype conversion code was refactored to use pointers to + H5T_t datatype structures internally rather than IDs wrapping + the pointers to those structures. These IDs are needed if an + application-registered conversion function or conversion exception + function are involved during the conversion process. For simplicity, + the conversion code simply passed these IDs down and let the internal + code unwrap the IDs as necessary when needing to access the wrapped + H5T_t structures. However, this could cause a significant amount of + repeated ID lookups for compound datatypes and other container-like + datatypes. The code now passes down pointers to the datatype + structures and only creates IDs to wrap those pointers as necessary. + Quick testing showed an average ~3x to ~10x improvement in performance + of conversions on container-like datatypes, depending on the + complexity of the datatype. + + * A conversion "context" structure was added to hold information about + the current conversion being performed. This allows conversions on + container-like datatypes to be optimized better by skipping certain + portions of the conversion process that remain relatively constant + when multiple elements of the container-like datatype are being + converted. + + * After refactoring the datatype conversion code to use pointers + internally rather than IDs, several copies of datatypes that were + made by higher levels of the library were able to be removed. The + internal IDs that were previously registered to wrap those copied + datatypes were also able to be removed. + + - Implemented optimized support for vector I/O in the Subfiling VFD + + Previously, the Subfiling VFD would handle vector I/O requests by + breaking them down into individual I/O requests, one for each entry + in the I/O vectors provided. This could result in poor I/O performance + for features in HDF5 that utilize vector I/O, such as parallel I/O + to filtered datasets. The Subfiling VFD now properly handles vector + I/O requests in their entirety, resulting in fewer I/O calls, improved + vector I/O performance and improved vector I/O memory efficiency. + + - Added support for in-place type conversion in most cases + + In-place type conversion allows the library to perform type conversion + without an intermediate type conversion buffer. This can improve + performance by allowing I/O in a single operation over the entire + selection instead of being limited by the size of the intermediate buffer. + Implemented for I/O on contiguous and chunked datasets when the selection + is contiguous in memory and when the memory datatype is not smaller than + the file datatype. + + - Changed selection I/O to be on by default when using the MPIO file driver + + - Added support for selection I/O in the MPIO file driver + + Previously, only vector I/O operations were supported. Support for + selection I/O should improve performance and reduce memory uses in some + cases. + + - Changed the error handling for a not found path in the find plugin process. + + While attempting to load a plugin the HDF5 library will fail if one of the + directories in the plugin paths does not exist, even if there are more paths + to check. Instead of exiting the function with an error, just logged the error + and continue processing the list of paths to check. + + - Implemented support for temporary security credentials for the Read-Only + S3 (ROS3) file driver. + + When using temporary security credentials, one also needs to specify a + session/security token next to the access key id and secret access key. + This token can be specified by the new API function H5Pset_fapl_ros3_token(). + The API function H5Pget_fapl_ros3_token() can be used to retrieve + the currently set token. + + - Added a Subfiling VFD configuration file prefix environment variable + + The Subfiling VFD now checks for values set in a new environment + variable "H5FD_SUBFILING_CONFIG_FILE_PREFIX" to determine if the + application has specified a pathname prefix to apply to the file + path for its configuration file. For example, this can be useful + for cases where the application wishes to write subfiles to a + machine's node-local storage while placing the subfiling configuration + file on a file system readable by all machine nodes. + + - Added H5Pset_selection_io(), H5Pget_selection_io(), and + H5Pget_no_selection_io_cause() API functions to manage the selection I/O + feature. This can be used to enable collective I/O with type conversion, + or it can be used with custom VFDs that support vector or selection I/O. + + - Added H5Pset_modify_write_buf() and H5Pget_modify_write_buf() API + functions to allow the library to modify the contents of write buffers, in + order to avoid malloc/memcpy. Currently only used for type conversion + with selection I/O. + + + Parallel Library: + ----------------- + - + + + Fortran Library: + ---------------- + - Added Fortran H5E APIs: + h5eregister_class_f, h5eunregister_class_f, h5ecreate_msg_f, h5eclose_msg_f + h5eget_msg_f, h5epush_f, h5eget_num_f, h5ewalk_f, h5eget_class_name_f, + h5eappend_stack_f, h5eget_current_stack_f, h5eset_current_stack_f, h5ecreate_stack_f, + h5eclose_stack_f, h5epop_f, h5eprint_f (C h5eprint v2 signature) + + - Added API support for Fortran MPI_F08 module definitions: + Adds support for MPI's MPI_F08 module datatypes: type(MPI_COMM) and type(MPI_INFO) for HDF5 APIs: + H5PSET_FAPL_MPIO_F, H5PGET_FAPL_MPIO_F, H5PSET_MPI_PARAMS_F, H5PGET_MPI_PARAMS_F + Ref. #3951 + + - Added Fortran APIs: + H5FGET_INTENT_F, H5SSEL_ITER_CREATE_F, H5SSEL_ITER_GET_SEQ_LIST_F, + H5SSEL_ITER_CLOSE_F, H5S_mp_H5SSEL_ITER_RESET_F + + - Added Fortran Parameters: + H5S_SEL_ITER_GET_SEQ_LIST_SORTED_F, H5S_SEL_ITER_SHARE_WITH_DATASPACE_F + + - Added Fortran Parameters: + H5S_BLOCK_F and H5S_PLIST_F + + - The configuration definitions file, H5config_f.inc, is now installed + and the HDF5 version number has been added to it. + + - Added Fortran APIs: + h5fdelete_f + + - Added Fortran APIs: + h5vlnative_addr_to_token_f and h5vlnative_token_to_address_f + + + C++ Library: + ------------ + - + + + Java Library: + ------------- + - + + + Tools: + ------ + - + + + High-Level APIs: + ---------------- + - + + + C Packet Table API: + ------------------- + - + + + Internal header file: + --------------------- + - + + + Documentation: + -------------- + - + + +Support for new platforms, languages and compilers +================================================== + - + + +Bug Fixes since HDF5-1.14.3 release +=================================== + Configuration: + ------------- + - Fix Autotools -Werror cleanup + + The Autotools temporarily scrub -Werror(=whatever) from CFLAGS, etc. + so configure checks don't trip over warnings generated by configure + check programs. The sed line originally only scrubbed -Werror but not + -Werror=something, which would cause errors when the '=something' was + left behind in CFLAGS. + + The sed line has been updated to handle -Werror=something lines. + + Fixes one issue raised in #3872 + + Library + ------- + - Fixed a leak of datatype IDs created internally during datatype conversion + + Fixed an issue where the library could leak IDs that it creates internally + for compound datatype members during datatype conversion. When the library's + table of datatype conversion functions is modified (such as when a new + conversion function is registered with the library from within an application), + the compound datatype conversion function has to recalculate data that it + has cached. When recalculating that data, the library was registering new + IDs for each of the members of the source and destination compound datatypes + involved in the conversion process and was overwriting the old cached IDs + without first closing them. This would result in use-after-free issues due + to multiple IDs pointing to the same internal H5T_t structure, as well as + crashes due to the library not gracefully handling partially initialized or + partially freed datatypes on library termination. + + Fixes h5py GitHub #2419 + + - Fixed many (future) CVE issues + + A partner organization corrected many potential security issues, which + were fixed and reported to us before submission to MITRE. These do + not have formal CVE issues assigned to them yet, so the numbers assigned + here are just placeholders. We will update the HDF5 1.14 CVE list (link + below) when official MITRE CVE tracking numbers are assigned. + + These CVE issues are generally of the same form as other reported HDF5 + CVE issues, and rely on the library failing while attempting to read + a malformed file. Most of them cause the library to segfault and will + probably be assigned "medium (~5/10)" scores by NIST, like the other + HDF5 CVE issues. + + The issues that were reported to us have all been fixed in this release, + so HDF5 will continue to have no unfixed public CVE issues. + + NOTE: HDF5 versions earlier than 1.14.4 should be considered vulnerable + to these issues and users should upgrade to 1.14.4 as soon as + possible. Note that it's possible to build the 1.14 library with + HDF5 1.8, 1.10, etc. API bindings for people who wish to enjoy + the benefits of a more secure library but don't want to upgrade + to the latest API. We will not be bringing the CVE fixes to earlier + versions of the library (they are no longer supported). + + LIST OF CVE ISSUES FIXED IN THIS RELEASE: + + * CVE-2024-0116-001 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5D__scatter_mem resulting in causing denial of service or potential + code execution + + * CVE-2024-0112-001 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5S__point_deserialize resulting in the corruption of the + instruction pointer and causing denial of service or potential code + execution + + * CVE-2024-0111-001 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5T__conv_struct_opt resulting in causing denial of service or + potential code execution + + * CVE-2023-1208-002 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5O__mtime_new_encode resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1208-001 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5O__layout_encode resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1207-001 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5O__dtype_encode_helper causing denial of service or potential + code execution + + * CVE-2023-1205-001 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5VM_array_fill resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1202-002 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5T__get_native_type resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1202-001 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5T__ref_mem_setnull resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1130-001 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5T_copy_reopen resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1125-001 + HDF5 versions <= 1.14.3 contain a heap buffer overflow in + H5Z__nbit_decompress_one_byte caused by the earlier use of an + initialized pointer. This may result in Denial of Service or + potential code execution + + * CVE-2023-1114-001 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5HG_read resulting in the corruption of the instruction pointer + and causing denial of service or potential code execution + + * CVE-2023-1113-002 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5F_addr_decode_len resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1113-001 + HDF5 versions <= 1.14.3 contain a heap buffer overflow caused by + the unsafe use of strdup in H5MM_xstrdup, resulting in denial of + service or potential code execution + + * CVE-2023-1108-001 + HDF5 versions <= 1.14.3 contain a out-of-bounds read operation in + H5FL_arr_malloc resulting in denial of service or potential code + execution + + * CVE-2023-1104-004 + HDF5 versions <= 1.14.3 contain a out-of-bounds read operation in + H5T_close_real resulting in denial of service or potential code + execution + + * CVE-2023-1104-003 + HDF5 library versions <=1.14.3 contain a heap buffer overflow flaw + in the function H5HL__fl_deserialize resulting in denial of service + or potential code execution + + * CVE-2023-1104-002 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5HL__fl_deserialize resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1104-001 + HDF5 library versions <=1.14.3 contains a stack overflow in the + function H5E_printf_stack resulting in denial of service or + potential code execution + + * CVE-2023-1023-001 + HDF5 library versions <=1.14.3 heap buffer overflow in + H5VM_memcpyvv which may result in denial of service or code + execution + + * CVE-2023-1019-001 + HDF5 library versions <=1.14.3 contain a stack buffer overflow in + H5VM_memcpyvv resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1018-001 + HDF5 library versions <=1.14.3 contain a memory corruption in + H5A__close resulting in the corruption of the instruction pointer + and causing denial of service or potential code execution + + * CVE-2023-1017-002 + HDF5 library versions <=1.14.3 may use an uninitialized value + H5A__attr_release_table resulting in denial of service + + * CVE-2023-1017-001 + HDF5 library versions <=1.14.3 may attempt to dereference + uninitialized values in h5tools_str_sprint, which will lead to + denial of service + + * CVE-2023-1013-004 + HDF5 versions <= 1.13.3 contain a stack buffer overflow in + H5HG_read resulting in denial of service or potential code + execution + + * CVE-2023-1013-003 + HDF5 library versions <=1.14.3 contain a buffer overrun in + H5Z__filter_fletcher32 resulting in the corruption of the + instruction pointer and causing denial of service or potential + code execution + + * CVE-2023-1013-002 + HDF5 library versions <=1.14.3 contain a buffer overrun in + H5O__linfo_decode resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1013-001 + HDF5 library versions <=1.14.3 contain a buffer overrun in + H5Z__filter_scaleoffset resulting in the corruption of the + instruction pointer and causing denial of service or potential + code execution + + * CVE-2023-1012-001 + HDF5 library versions <=1.14.3 contain a stack buffer overflow in + H5R__decode_heap resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1010-001 + HDF5 library versions <=1.14.3 contain a stack buffer overflow in + H5FL_arr_malloc resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1009-001 + HDF5 library versions <=1.14.3 contain a stack buffer overflow in + H5FL_arr_malloc resulting in the corruption of the instruction + pointer and causing denial of service or potential code execution + + * CVE-2023-1006-004 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5A__attr_release_table resulting in the corruption of the + instruction pointer and causing denial of service or potential code + execution + + * CVE-2023-1006-003 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5T__bit_find resulting in the corruption of the instruction pointer + and causing denial of service or potential code execution. + + * CVE-2023-1006-002 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5HG_read resulting in the corruption of the instruction pointer + and causing denial of service or potential code execution + + * CVE-2023-1006-001 + HDF5 library versions <=1.14.3 contain a heap buffer overflow in + H5HG__cache_heap_deserialize resulting in the corruption of the + instruction pointer and causing denial of service or potential code + execution + + FULL OFFICIAL HDF5 CVE list (from mitre.org): + + https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword=HDF5 + + 1.14.x CVE tracking list: + + https://github.com/HDFGroup/hdf5/blob/hdf5_1_14/CVE_list_1_14.md + + HDF5 CVE regression test suite (includes proof-of-concept files): + + https://github.com/HDFGroup/cve_hdf5 + + - Fixed a divide-by-zero issue when a corrupt file sets the page size to 0 + + If a corrupt file sets the page buffer size in the superblock to zero, + the library could attempt to divide by zero when allocating space in + the file. The library now checks for valid page buffer sizes when + reading the superblock message. + + Fixes oss-fuzz issue 58762 + + - Fixed a bug when using array datatypes with certain parent types + + Array datatype conversion would never use a background buffer, even if the + array's parent type (what the array is an array of) required a background + buffer for conversion. This resulted in crashes in some cases when using + an array of compound, variable length, or reference datatypes. Array types + now use a background buffer if needed by the parent type. + + - Fixed potential buffer read overflows in H5PB_read + + H5PB_read previously did not account for the fact that the size of the + read it's performing could overflow the page buffer pointer, depending + on the calculated offset for the read. This has been fixed by adjusting + the size of the read if it's determined that it would overflow the page. + + - Fixed CVE-2017-17507 + + This CVE was previously declared fixed, but later testing with a static + build of HDF5 showed that it was not fixed. + + When parsing a malformed (fuzzed) compound type containing variable-length + string members, the library could produce a segmentation fault, crashing + the library. + + This was fixed after GitHub PR #4234 + + Fixes GitHub issue #3446 + + - Fixed a cache assert with very large metadata objects + + If the library tries to load a metadata object that is above a + certain size, this would trip an assert in debug builds. This could + happen if you create a very large number of links in an old-style + group that uses local heaps. + + There is no need for this assert. The library's metadata cache + can handle large objects. The assert has been removed. + + Fixes GitHub #3762 + + - Fixed an issue with the Subfiling VFD and multiple opens of a + file + + An issue with the way the Subfiling VFD handles multiple opens + of the same file caused the file structures for the extra opens + to occasionally get mapped to an incorrect subfiling context + object. The VFD now correctly maps the file structures for + additional opens of an already open file to the same context + object. + + - Fixed a bug that causes the library to incorrectly identify + the endian-ness of 16-bit and smaller C floating-point datatypes + + When detecting the endian-ness of an in-memory C floating-point + datatype, the library previously always assumed that the type + was at least 32 bits in size. This resulted in invalid memory + accesses and would usually cause the library to identify the + datatype as having an endian-ness of H5T_ORDER_VAX. This has + now been fixed. + + - Fixed a bug that causes an invalid memory access issue when + converting 16-bit floating-point values to integers with the + library's software conversion function + + The H5T__conv_f_i function previously always assumed that + floating-point values were at least 32 bits in size and would + access invalid memory when attempting to convert 16-bit + floating-point values to integers. To fix this, parts of the + H5T__conv_f_i function had to be rewritten, which also resulted + in a significant speedup when converting floating-point values + to integers where the library does not have a hard conversion + path. This is the case for any floating-point values with a + datatype not represented by H5T_NATIVE_FLOAT16 (if _Float16 is + supported), H5T_NATIVE_FLOAT, H5T_NATIVE_DOUBLE or + H5T_NATIVE_LDOUBLE. + + - Fixed a bug that can cause incorrect data when overflows occur + while converting integer values to floating-point values with + the library's software conversion function + + The H5T__conv_i_f function had a bug which previously caused it + to return incorrect data when an overflow occurs and an application's + conversion exception callback function decides not to handle the + overflow. Rather than return positive infinity, the library would + return truncated data. This has now been fixed. + + - Corrected H5Soffset_simple() when offset is NULL + + The reference manual states that the offset parameter of H5Soffset_simple() + can be set to NULL to reset the offset of a simple dataspace to 0. This + has never been true, and passing NULL was regarded as an error. + + The library will now accept NULL for the offset parameter and will + correctly set the offset to zero. + + Fixes HDFFV-9299 + + - Fixed an issue where the Subfiling VFD's context object cache could + grow too large + + The Subfiling VFD keeps a cache of its internal context objects to + speed up access to a context object for a particular file, as well + as access to that object across multiple opens of the same file. + However, opening a large amount of files with the Subfiling VFD over + the course of an application's lifetime could cause this cache to grow + too large and result in the application running out of available MPI + communicator objects. On file close, the Subfiling VFD now simply + evicts context objects out of its cache and frees them. It is assumed + that multiple opens of a file will be a less common use case for the + Subfiling VFD, but this can be revisited if it proves to be an issue + for performance. + + - Fixed error when overwriting certain nested variable length types + + Previously, when using a datatype that included a variable length type + within a compound or array within another variable length type, and + overwriting data with a shorter (top level) variable length sequence, an + error could occur. This has been fixed. + + - Take user block into account in H5Dchunk_iter() and H5Dget_chunk_info() + + The address reported by the following functions did not correctly + take the user block into account: + + * H5Dchunk_iter() <-- addr passed to callback + * H5Dget_chunk_info() <-- addr parameter + * H5Dget_chunk_info_by_coord() <-- addr parameter + + This means that these functions reported logical HDF5 file addresses, + which would only be equal to the physical addresses when there is no + user block prepended to the HDF5 file. This is unfortunate, as the + primary use of these functions is to get physical addresses in order + to directly access the chunks. + + The listed functions now correctly take the user block into account, + so they will emit physical addresses that can be used to directly + access the chunks. + + Fixes #3003 + + - Fixed asserts raised by large values of H5Pset_est_link_info() parameters + + If large values for est_num_entries and/or est_name_len were passed + to H5Pset_est_link_info(), the library would attempt to create an + object header NIL message to reserve enough space to hold the links in + compact form (i.e., concatenated), which could exceed allowable object + header message size limits and trip asserts in the library. + + This bug only occurred when using the HDF5 1.8 file format or later and + required the product of the two values to be ~64k more than the size + of any links written to the group, which would cause the library to + write out a too-large NIL spacer message to reserve the space for the + unwritten links. + + The library now inspects the phase change values to see if the dataset + is likely to be compact and checks the size to ensure any NIL spacer + messages won't be larger than the library allows. + + Fixes GitHub #1632 + + - Fixed a bug where H5Tset_fields does not account for any offset + set for a floating-point datatype when determining if values set + for spos, epos, esize, mpos and msize make sense for the datatype + + Previously, H5Tset_fields did not take datatype offsets into account + when determining if the values set make sense for the datatype. + This would cause the function to fail when the precision for a + datatype is correctly set such that the offset bits are not included. + This has now been fixed. + + - Fixed H5Fget_access_plist so that it returns the file locking + settings for a file + + When H5Fget_access_plist (and the internal H5F_get_access_plist) + is called on a file, the returned File Access Property List has + the library's default file locking settings rather than any + settings set for the file. This causes two problems: + + - Opening an HDF5 file through an external link using H5Gopen, + H5Dopen, etc. with H5P_DEFAULT for the Dataset/Group/etc. + Access Property List will cause the external file to be opened + with the library's default file locking settings rather than + inheriting them from the parent file. This can be surprising + when a file is opened with file locking disabled, but its + external files are opened with file locking enabled. + + - An application cannot make use of the H5Pset_elink_fapl + function to match file locking settings between an external + file and its parent file without knowing the correct setting + ahead of time, as calling H5Fget_access_plist on the parent + file will not return the correct settings. + + This has been fixed by copying a file's file locking settings + into the newly-created File Access Property List in H5F_get_access_plist. + + This fix partially addresses GitHub issue #4011 + + - Memory usage growth issue + + Starting with the HDF5 1.12.1 release, an issue (GitHub issue #1256) + was observed where running a simple program that has a loop of opening + a file, reading from an object with a variable-length datatype and + then closing the file would result in the process fairly quickly + running out of memory. Upon further investigation, it was determined + that this memory was being kept around in the library's datatype + conversion pathway cache that is used to speed up datatype conversions + which are repeatedly used within an HDF5 application's lifecycle. For + conversions involving variable-length or reference datatypes, each of + these cached pathway entries keeps a reference to its associated file + for later use. Since the file was being closed and reopened on each + loop iteration, and since the library compares for equality between + instances of opened files (rather than equality of the actual files) + when determining if it can reuse a cached conversion pathway, it was + determining that no cached conversion pathways could be reused and was + creating a new cache entry on each loop iteration during I/O. This + would lead to constant growth of that cache and the memory it consumed, + as well as constant growth of the memory consumed by each cached entry + for the reference to its associated file. + + To fix this issue, the library now removes any cached datatype + conversion path entries for variable-length or reference datatypes + associated with a particular file when that file is closed. + + Fixes GitHub #1256 + + - Suppressed floating-point exceptions in H5T init code + + The floating-point datatype initialization code in H5Tinit_float.c + could raise FE_INVALID exceptions while munging bits and performing + comparisons that might involve NaN. This was not a problem when the + initialization code was executed in H5detect at compile time (prior + to 1.14.3), but now that the code is executed at library startup + (1.14.3+), these exceptions can be caught by user code, as is the + default in the NAG Fortran compiler. + + Starting in 1.14.4, we now suppress floating-point exceptions while + initializing the floating-point types and clear FE_INVALID before + restoring the original environment. + + Fixes GitHub #3831 + + - Fixed a file handle leak in the core VFD + + When opening a file with the core VFD and a file image, if the file + already exists, the file check would leak the POSIX file handle. + + Fixes GitHub issue #635 + + - Dropped support for MPI-2 + + The MPI-2 supporting artifacts have been removed due to the cessation + of MPI-2 maintenance and testing since version HDF5 1.12. + + + - Fixed a segfault when using a user-defined conversion function between compound datatypes + + During type info initialization for compound datatype conversion, the library checked if the + datatypes are subsets of one another in order to perform special conversion handling. + This check uses information that is only defined if a library conversion function is in use. + The library now skips this check for user-defined conversion functions. + + Fixes Github issue #3840 + + Java Library + ------------ + - + + + Configuration + ------------- + - Changed default of 'Error on HDF5 doxygen warnings' DOXYGEN_WARN_AS_ERROR option. + + The default setting of DOXYGEN_WARN_AS_ERROR to 'FAIL_ON_WARNINGS' has been changed + to 'NO'. It was decided that the setting was too aggressive and should be a user choice. + The github actions and scripts have been updated to reflect this. + + * HDF5_ENABLE_DOXY_WARNINGS: ON/OFF (Default: OFF) + * --enable-doxygen-errors: enable/disable (Default: disable) + + - Removed an Autotools configure hack that causes problems on MacOS + + A sed line in configure.ac was added in the past to paper over some + problems with older versions of the Autotools that would add incorrect + linker flags. This hack is not needed with recent versions of the + Autotools and the sed line errors on MacOS (though this was a silent + error that didn't break the build) so the hack has been removed. + + Fixes GitHub issue #3843 + + - Fixed an issue where the h5tools_test_utils test program was being + installed on the system for Autotools builds of HDF5 + + The h5tools_test_utils test program was mistakenly added to bin_PROGRAMS + in its Makefile.am configuration file, causing the executable to be + installed on the system. The executable is now added to noinst_PROGRAMS + instead and will no longer be installed on the system for Autotools builds + of HDF5. The CMake configuration code already avoids installing the + executable on the system. + + + Tools + ----- + - Renamed h5fuse.sh to h5fuse + + Addresses Discussion #3791 + + + Performance + ------------- + - + + + Fortran API + ----------- + - Fixed: HDF5 fails to compile with -Werror=lto-type-mismatch + + Removed the use of the offending C stub wrapper. + + Fixes GitHub issue #3987 + + + High-Level Library + ------------------ + - Fixed a memory leak in H5LTopen_file_image with H5LT_FILE_IMAGE_DONT_COPY flag + + When the H5LT_FILE_IMAGE_DONT_COPY flag is passed to H5LTopen_file_image, the + internally-allocated udata structure gets leaked as the core file driver doesn't + have a way to determine when or if it needs to call the "udata_free" callback. + This has been fixed by freeing the udata structure when the "image_free" callback + gets made during file close, where the file is holding the last reference to the + udata structure. + + Fixes GitHub issue #827 + + + Fortran High-Level APIs + ----------------------- + - + + + Documentation + ------------- + - + + + F90 APIs + -------- + - + + + C++ APIs + -------- + - + + + Testing + ------- + - Fixed a bug in the dt_arith test when H5_WANT_DCONV_EXCEPTION is not + defined + + The dt_arith test program's test_particular_fp_integer sub-test tries + to ensure that the library correctly raises a datatype conversion + exception when converting a floating-point value to an integer overflows. + However, this test would run even when H5_WANT_DCONV_EXCEPTION isn't + defined, causing the test to fail due to the library not raising + datatype conversion exceptions. This has now been fixed by not running + the test when H5_WANT_DCONV_EXCEPTION is not defined. + + - Fixed a testing failure in testphdf5 on Cray machines + + On some Cray machines, what appears to be a bug in Cray MPICH was causing + calls to H5Fis_accessible to create a 0-byte file with strange Unix + permissions. This was causing an H5Fdelete file deletion test in the + testphdf5 program to fail due to a just-deleted HDF5 file appearing to + still be accessible on the file system. The issue in Cray MPICH has been + worked around for the time being by resetting the MPI_Info object on the + File Access Property List used to MPI_INFO_NULL before passing it to the + H5Fis_accessible call. + + - A bug was fixed in the HDF5 API test random datatype generation code + + A bug in the random datatype generation code could cause test failures + when trying to generate an enumeration datatype that has duplicated + name/value pairs in it. This has now been fixed. + + - A bug was fixed in the HDF5 API test VOL connector registration checking code + + The HDF5 API test code checks to see if the VOL connector specified by the + HDF5_VOL_CONNECTOR environment variable (if any) is registered with the library + before attempting to run tests with it so that testing can be skipped and an + error can be returned when a VOL connector fails to register successfully. + Previously, this code didn't account for VOL connectors that specify extra + configuration information in the HDF5_VOL_CONNECTOR environment variable and + would incorrectly report that the specified VOL connector isn't registered + due to including the configuration information as part of the VOL connector + name being checked for registration status. This has now been fixed. + + - Fixed Fortran 2003 test with gfortran-v13, optimization levels O2,O3 + + Fixes failing Fortran 2003 test with gfortran, optimization level O2,O3 + with -fdefault-real-16. Fixes GH #2928. + + +Platforms Tested +=================== + + - HDF5 supports the latest macOS versions, including the current and two + preceding releases. As new major macOS versions become available, HDF5 + will discontinue support for the oldest version and add the latest + version to its list of compatible systems, along with the previous two + releases. + + Linux 5.16.14-200.fc35 GNU gcc (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + Fedora35 clang version 13.0.0 (Fedora 13.0.0-3.fc35) + (cmake and autotools) + + Linux 5.19.0-1023-aws GNU gcc, gfortran, g++ + #24-Ubuntu SMP x86_64 GNU/Linux (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0 + Ubuntu 22.04 Ubuntu clang version 14.0.0-1ubuntu1 + Intel(R) oneAPI DPC++/C++ Compiler 2023.1.0 + ifort (IFORT) 2021.9.0 20230302 + (cmake and autotools) + + Linux 5.14.21-cray_shasta_c cray-mpich/8.1.23 + #1 SMP x86_64 GNU/Linux cce/15.0.0 + (frontier) gcc/12.2.0 + (cmake) + + Linux 5.11.0-34-generic GNU gcc (GCC) 9.4.0-1ubuntu1 + #36-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 9.4.0-1ubuntu1 + Ubuntu 20.04 Ubuntu clang version 10.0.0-4ubuntu1 + Intel(R) oneAPI DPC++/C++ Compiler 2023.1.0 + ifort (IFORT) 2021.9.0 20230302 + (cmake and autotools) + + Linux 4.14.0-115.35.1.1chaos aue/openmpi/4.1.4-arm-22.1.0.12 + #1 SMP aarch64 GNU/Linux Arm C/C++/Fortran Compiler version 22.1 + (stria) (based on LLVM 13.0.1) + (cmake) + + Linux 4.14.0-115.35.1.3chaos spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1 + (vortex) GCC 8.3.1 + XL 2021.09.22 + (cmake) + + Linux-4.14.0-115.21.2 spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1, 14.0.5 + (lassen) GCC 8.3.1 + XL 16.1.1.2, 2021.09.22, 2022.08.05 + (cmake) + + Linux-4.12.14-197.99-default cray-mpich/7.7.14 + #1 SMP x86_64 GNU/Linux cce 12.0.3 + (theta) GCC 11.2.0 + llvm 9.0 + Intel 19.1.2 + + Linux 3.10.0-1160.36.2.el7.ppc64 gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + #1 SMP ppc64be GNU/Linux g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + Power8 (echidna) GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + + Linux 3.10.0-1160.24.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos7 Version 4.8.5 20150623 (Red Hat 4.8.5-4) + (jelly/kituo/moohan) Version 4.9.3, Version 7.2.0, Version 8.3.0, + Version 9.1.0, Version 10.2.0 + Intel(R) C (icc), C++ (icpc), Fortran (icc) + compilers: + Version 17.0.0.098 Build 20160721 + GNU C (gcc) and C++ (g++) 4.8.5 compilers + with NAG Fortran Compiler Release 7.1(Hanzomon) + Intel(R) C (icc) and C++ (icpc) 17.0.0.098 compilers + with NAG Fortran Compiler Release 7.1(Hanzomon) + MPICH 3.1.4 compiled with GCC 4.9.3 + MPICH 3.3 compiled with GCC 7.2.0 + OpenMPI 3.1.3 compiled with GCC 7.2.0 and 4.1.2 + compiled with GCC 9.1.0 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Versions 18.4.0 and 19.10-0 + NVIDIA nvc, nvfortran and nvc++ version 22.5-0 + (autotools and cmake) + + + Linux-3.10.0-1160.0.0.1chaos openmpi-4.1.2 + #1 SMP x86_64 GNU/Linux clang 6.0.0, 11.0.1 + (quartz) GCC 7.3.0, 8.1.0 + Intel 19.0.4, 2022.2, oneapi.2022.2 + + Linux-3.10.0-1160.90.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (skybridge) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.90.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (attaway) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.90.1.1chaos openmpi-intel/4.1 + #1 SMP x86_64 GNU/Linux Intel/19.1.2, 21.3.0 and 22.2.0 + (chama) (cmake) + + macOS Apple M1 11.6 Apple clang version 12.0.5 (clang-1205.0.22.11) + Darwin 20.6.0 arm64 gfortran GNU Fortran (Homebrew GCC 11.2.0) 11.1.0 + (macmini-m1) Intel icc/icpc/ifort version 2021.3.0 202106092021.3.0 20210609 + + macOS Big Sur 11.3.1 Apple clang version 12.0.5 (clang-1205.0.22.9) + Darwin 20.4.0 x86_64 gfortran GNU Fortran (Homebrew GCC 10.2.0_3) 10.2.0 + (bigsur-1) Intel icc/icpc/ifort version 2021.2.0 20210228 + + Mac OS X El Capitan 10.11.6 Apple clang version 7.3.0 from Xcode 7.3 + 64-bit gfortran GNU Fortran (GCC) 5.2.0 + (osx1011test) Intel icc/icpc/ifort version 16.0.2 + + Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos6 Version 4.4.7 20120313 + (platypus) Version 4.9.3, 5.3.0, 6.2.0 + MPICH 3.1.4 compiled with GCC 4.9.3 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Version 19.10-0 + + Windows 10 x64 Visual Studio 2019 w/ clang 12.0.0 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2019 w/ Intel (C/C++ only - cmake) + Visual Studio 2022 w/ clang 15.0.1 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2022 w/ Intel C/C++/Fortran oneAPI 2023 (cmake) + Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake) + + +Known Problems +============== + + - When building with the NAG Fortran compiler using the Autotools and libtool + 2.4.2 or earlier, the -shared flag will be missing '-Wl,', which will cause + compilation to fail. This is due to a bug in libtool that was fixed in 2012 + and released in 2.4.4 in 2014. + + - When the library detects and builds in support for the _Float16 datatype, an + issue has been observed on at least one MacOS 14 system where the library + fails to initialize due to not being able to detect the byte order of the + _Float16 type (https://github.com/HDFGroup/hdf5/issues/4310): + + #5: H5Tinit_float.c line 308 in H5T__fix_order(): failed to detect byte order + major: Datatype + minor: Unable to initialize object + + If this issue is encountered, support for the _Float16 type can be disabled + with a configuration option: + + CMake: HDF5_ENABLE_NONSTANDARD_FEATURE_FLOAT16=OFF + Autotools: --disable-nonstandard-feature-float16 + + - When HDF5 is compiled with NVHPC versions 23.5 - 23.9 (additional versions may + also be applicable) and with -O2 (or higher) and -DNDEBUG, test failures occur + in the following tests: + + H5PLUGIN-filter_plugin + H5TEST-flush2 + H5TEST-testhdf5-base + MPI_TEST_t_filters_parallel + + Sporadic failures (even with lower -O levels): + Java JUnit-TestH5Pfapl + Java JUnit-TestH5D + + Also, NVHPC will fail to compile the test/tselect.c test file with a compiler + error of 'use of undefined value' when the optimization level is -O2 or higher. + + This is confirmed to be a bug in the nvc compiler that has been fixed as of + 23.11. If you are using an affected version of the NVidia compiler, the + work-around is to set the optimization level to -O1. + + https://forums.developer.nvidia.com/t/hdf5-no-longer-compiles-with-nv-23-9/269045 + + - CMake files do not behave correctly with paths containing spaces. + Do not use spaces in paths because the required escaping for handling spaces + results in very complex and fragile build files. + + - At present, metadata cache images may not be generated by parallel + applications. Parallel applications can read files with metadata cache + images, but since this is a collective operation, a deadlock is possible + if one or more processes do not participate. + + - The subsetting option in ph5diff currently will fail and should be avoided. + The subsetting option works correctly in serial h5diff. + + - Flang Fortran compilation will fail (last check version 17) due to not yet + implemented: (1) derived type argument passed by value (H5VLff.F90), + and (2) support for REAL with KIND = 2 in intrinsic SPACING used in testing. + + - Fortran tests HDF5_1_8.F90 and HDF5_F03.F90 will fail with Cray compilers + greater than version 16.0 due to a compiler bug. The latest version verified + as failing was version 17.0. + + - Several tests currently fail on certain platforms: + MPI_TEST-t_bigio fails with spectrum-mpi on ppc64le platforms. + + MPI_TEST-t_subfiling_vfd and MPI_TEST_EXAMPLES-ph5_subfiling fail with + cray-mpich on theta and with XL compilers on ppc64le platforms. + + MPI_TEST_testphdf5_tldsc fails with cray-mpich 7.7 on cori and theta. + + - File space may not be released when overwriting or deleting certain nested + variable length or reference types. + + - Known problems in previous releases can be found in the HISTORY*.txt files + in the HDF5 source. Please report any new problems found to + help@hdfgroup.org. + + +CMake vs. Autotools installations +================================= +While both build systems produce similar results, there are differences. +Each system produces the same set of folders on Linux (only CMake works +on standard Windows); bin, include, lib and share. Autotools places the +COPYING and RELEASE.txt file in the root folder, CMake places them in +the share folder. + +The bin folder contains the tools and the build scripts. Additionally, CMake +creates dynamic versions of the tools with the suffix "-shared". Autotools +installs one set of tools depending on the "--enable-shared" configuration +option. + build scripts + ------------- + Autotools: h5c++, h5cc, h5fc + CMake: h5c++, h5cc, h5hlc++, h5hlcc + +The include folder holds the header files and the fortran mod files. CMake +places the fortran mod files into separate shared and static subfolders, +while Autotools places one set of mod files into the include folder. Because +CMake produces a tools library, the header files for tools will appear in +the include folder. + +The lib folder contains the library files, and CMake adds the pkgconfig +subfolder with the hdf5*.pc files used by the bin/build scripts created by +the CMake build. CMake separates the C interface code from the fortran code by +creating C-stub libraries for each Fortran library. In addition, only CMake +installs the tools library. The names of the szip libraries are different +between the build systems. + +The share folder will have the most differences because CMake builds include +a number of CMake specific files for support of CMake's find_package and support +for the HDF5 Examples CMake project. + +The issues with the gif tool are: + HDFFV-10592 CVE-2018-17433 + HDFFV-10593 CVE-2018-17436 + HDFFV-11048 CVE-2020-10809 +These CVE issues have not yet been addressed and are avoided by not building +the gif tool by default. Enable building the High-Level tools with these options: + autotools: --enable-hlgiftools + cmake: HDF5_BUILD_HL_GIF_TOOLS=ON + + +%%%%1.14.3%%%% + +HDF5 version 1.14.3 released on 2023-10-27 +================================================================================ + + +INTRODUCTION +============ + +This document describes the differences between this release and the previous +HDF5 release. It contains information on the platforms tested and known +problems in this release. For more details check the HISTORY*.txt files in the +HDF5 source. + +Note that documentation in the links below will be updated at the time of each +final release. + +Links to HDF5 documentation can be found on The HDF5 web page: + + https://portal.hdfgroup.org/display/HDF5/HDF5 + +The official HDF5 releases can be obtained from: + + https://www.hdfgroup.org/downloads/hdf5/ + +Changes from release to release and new features in the HDF5-1.14.x release series +can be found at: + + https://portal.hdfgroup.org/display/HDF5/Release+Specific+Information + +If you have any questions or comments, please send them to the HDF Help Desk: + + help@hdfgroup.org + + +CONTENTS +======== + +- New Features +- Support for new platforms and languages +- Bug Fixes since HDF5-1.14.2 +- Platforms Tested +- Known Problems +- CMake vs. Autotools installations + + +New Features +============ + + Configuration: + ------------- + - Improved support for Intel oneAPI + + * Separates the old 'classic' Intel compiler settings and warnings + from the oneAPI settings + * Uses `-check nouninit` in debug builds to avoid false positives + when building H5_buildiface with `-check all` + * Both Autotools and CMake + + - Added new options for CMake and Autotools to control the Doxygen + warnings as errors setting. + + * HDF5_ENABLE_DOXY_WARNINGS: ON/OFF (Default: ON) + * --enable-doxygen-errors: enable/disable (Default: enable) + + The default will fail to compile if the doxygen parsing generates warnings. + The option can be disabled for certain versions of doxygen with parsing + issues. i.e. 1.9.5, 1.9.8. + + Addresses GitHub issue #3398 + + - Added support for AOCC and classic Flang w/ the Autotools + + * Adds a config/clang-fflags options file to support Flang + * Corrects missing "-Wl," from linker options in the libtool wrappers + when using Flang, the MPI Fortran compiler wrappers, and building + the shared library. This would often result in unrecognized options + like -soname. + * Enable -nomp w/ Flang to avoid linking to the OpenMPI library. + + CMake can build the parallel, shared library w/ Fortran using AOCC + and Flang, so no changes were needed for that build system. + + Fixes GitHub issues #3439, #1588, #366, #280 + + - Converted the build of libaec and zlib to use FETCH_CONTENT with CMake. + + Using the CMake FetchContent module, the external filters can populate + content at configure time via any method supported by the ExternalProject + module. Whereas ExternalProject_Add() downloads at build time, the + FetchContent module makes content available immediately, allowing the + configure step to use the content in commands like add_subdirectory(), + include() or file() operations. + + Removed HDF options for using FETCH_CONTENT explicitly: + BUILD_SZIP_WITH_FETCHCONTENT:BOOL + BUILD_ZLIB_WITH_FETCHCONTENT:BOOL + + - Thread-safety + static library disabled on Windows w/ CMake + + The thread-safety feature requires hooks in DllMain(), which is only + present in the shared library. + + We previously just warned about this, but now any CMake configuration + that tries to build thread-safety and the static library will fail. + This cannot be overridden with ALLOW_UNSUPPORTED. + + Fixes GitHub issue #3613 + + - Autotools builds now build the szip filter by default when an appropriate + library is found + + Since libaec is prevalent and BSD-licensed for both encoding and + decoding, we build the szip filter by default now. + + Both autotools and CMake build systems will process the szip filter the same as + the zlib filter is processed. + + - Removed CMake cross-compiling variables + + * HDF5_USE_PREGEN + * HDF5_BATCH_H5DETECT + + These were used to work around H5detect and H5make_libsettings and + are no longer required. + + - Running H5make_libsettings is no longer required for cross-compiling + + The functionality of H5make_libsettings is now handled via template files, + so H5make_libsettings has been removed. + + - Running H5detect is no longer required for cross-compiling + + The functionality of H5detect is now exercised at library startup, + so H5detect has been removed. + + + Library: + -------- + - Added a simple cache to the read-only S3 (ros3) VFD + + The read-only S3 VFD now caches the first N bytes of a file stored + in S3 to avoid a lot of small I/O operations when opening files. + This cache is per-file and created when the file is opened. + + N is currently 16 MiB or the size of the file, whichever is smaller. + + Addresses GitHub issue #3381 + + - Added new API function H5Pget_actual_selection_io_mode() + + This function allows the user to determine if the library performed + selection I/O, vector I/O, or scalar (legacy) I/O during the last HDF5 + operation performed with the provided DXPL. + + + Parallel Library: + ----------------- + - Added optimized support for the parallel compression feature when + using the multi-dataset I/O API routines collectively + + Previously, calling H5Dwrite_multi/H5Dread_multi collectively in parallel + with a list containing one or more filtered datasets would cause HDF5 to + break out of the optimized multi-dataset I/O mode and instead perform I/O + by looping over each dataset in the I/O request. The library has now been + updated to perform I/O in a more optimized manner in this case by first + performing I/O on all the filtered datasets at once and then performing + I/O on all the unfiltered datasets at once. + + - Changed H5Pset_evict_on_close so that it can be called with a parallel + build of HDF5 + + Previously, H5Pset_evict_on_close would always fail when called from a + parallel build of HDF5, stating that the feature is not supported with + parallel HDF5. This failure would occur even if a parallel build of HDF5 + was used with a serial HDF5 application. H5Pset_evict_on_close can now + be called regardless of the library build type and the library will + instead fail during H5Fcreate/H5Fopen if the "evict on close" property + has been set to true and the file is being opened for parallel access + with more than 1 MPI process. + + + Fortran Library: + ---------------- + - Fixed an uninitialized error return value for hdferr + to return the error state of the h5aopen_by_idx_f API. + + - Added h5pget_vol_cap_flags_f and related Fortran VOL + capability definitions. + + - Fortran async APIs H5A, H5D, H5ES, H5G, H5F, H5L and H5O were added. + + - Added Fortran APIs: + h5pset_selection_io_f, h5pget_selection_io_f, + h5pget_actual_selection_io_mode_f, + h5pset_modify_write_buf_f, h5pget_modify_write_buf_f + + - Added Fortran APIs: + h5get_free_list_sizes_f, h5dwrite_chunk_f, h5dread_chunk_f, + h5fget_info_f, h5lvisit_f, h5lvisit_by_name_f, + h5pget_no_selection_io_cause_f, h5pget_mpio_no_collective_cause_f, + h5sselect_shape_same_f, h5sselect_intersect_block_f, + h5pget_file_space_page_size_f, h5pset_file_space_page_size_f, + h5pget_file_space_strategy_f, h5pset_file_space_strategy_f + + - Removed "-commons" linking option on Darwin, as COMMON and EQUIVALENCE + are no longer used in the Fortran source. + + Fixes GitHub issue #3571 + + C++ Library: + ------------ + - + + + Java Library: + ------------- + - + + + Tools: + ------ + - + + + High-Level APIs: + ---------------- + - Added Fortran HL API: h5doappend_f + + + C Packet Table API: + ------------------- + - + + + Internal header file: + --------------------- + - + + + Documentation: + -------------- + - + + +Support for new platforms, languages and compilers +================================================== + - + + +Bug Fixes since HDF5-1.14.2 release +=================================== + Library + ------- + - Fixed some issues with chunk index metadata not getting read + collectively when collective metadata reads are enabled + + When looking up dataset chunks during I/O, the parallel library + temporarily disables collective metadata reads since it's generally + unlikely that the application will read the same chunks from all + MPI ranks. Leaving collective metadata reads enabled during + chunk lookups can lead to hangs or other bad behavior depending + on the chunk indexing structure used for the dataset in question. + However, due to the way that dataset chunk index metadata was + previously loaded in a deferred manner, this could mean that + the metadata for the main chunk index structure or its + accompanying pieces of metadata (e.g., fixed array data blocks) + could end up being read independently if these chunk lookup + operations are the first chunk index-related operation that + occurs on a dataset. This behavior is generally observed when + opening a dataset for which the metadata isn't in the metadata + cache yet and then immediately performing I/O on that dataset. + This behavior is not generally observed when creating a dataset + and then performing I/O on it, as the relevant metadata will + usually be in the metadata cache as a side effect of creating + the chunk index structures during dataset creation. + + This issue has been fixed by adding callbacks to the different + chunk indexing structure classes that allow more explicit control + over when chunk index metadata gets loaded. When collective + metadata reads are enabled, the necessary index metadata will now + get loaded collectively by all MPI ranks at the start of dataset + I/O to ensure that the ranks don't unintentionally read this + metadata independently further on. These changes fix collective + loading of the main chunk index structure, as well as v2 B-tree + root nodes, extensible array index blocks and fixed array data + blocks. There are still pieces of metadata that cannot currently + be loaded collectively, however, such as extensible array data + blocks, data block pages and super blocks, as well as fixed array + data block pages. These pieces of metadata are not necessarily + read in by all MPI ranks since this depends on which chunks the + ranks have selected in the dataset. Therefore, reading of these + pieces of metadata remains an independent operation. + + - Fixed potential hangs in parallel library during collective I/O with + independent metadata writes + + When performing collective parallel writes to a dataset where metadata + writes are requested as (or left as the default setting of) independent, + hangs could potentially occur during metadata cache sync points. This + was due to incorrect management of the internal state tracking whether + an I/O operation should be collective or not, causing the library to + attempt collective writes of metadata when they were meant to be + independent writes. During the metadata cache sync points, if the number + of cache entries being flushed was a multiple of the number of MPI ranks + in the MPI communicator used to access the HDF5 file, an equal amount of + collective MPI I/O calls were made and the dataset write call would be + successful. However, when the number of cache entries being flushed was + NOT a multiple of the number of MPI ranks, the ranks with more entries + than others would get stuck in an MPI_File_set_view call, while other + ranks would get stuck in a post-write MPI_Barrier call. This issue has + been fixed by correctly switching to independent I/O temporarily when + writing metadata independently during collective dataset I/O. + + - Fixed a bug with the way the Subfiling VFD assigns I/O concentrators + + During a file open operation, the Subfiling VFD determines the topology + of the application and uses that to select a subset of MPI ranks that + I/O will be forwarded to, called I/O concentrators. The code for this + had previously assumed that the parallel job launcher application (e.g., + mpirun, srun, etc.) would distribute MPI ranks sequentially to a node's + processors until all processors on that node have been assigned before + going on to the next node. When the launcher application mapped MPI ranks + to nodes in a different fashion, such as round-robin, this could cause + the Subfiling VFD to incorrectly map MPI ranks as I/O concentrators, + leading to missing subfiles. + + - Fixed a file space allocation bug in the parallel library for chunked + datasets + + With the addition of support for incremental file space allocation for + chunked datasets with filters applied to them that are created/accessed + in parallel, a bug was introduced to the library's parallel file space + allocation code. This could cause file space to not be allocated correctly + for datasets without filters applied to them that are created with serial + file access and later opened with parallel file access. In turn, this could + cause parallel writes to those datasets to place incorrect data in the file. + + - Fixed an assertion failure in Parallel HDF5 when a file can't be created + due to an invalid library version bounds setting + + An assertion failure could occur in H5MF_settle_raw_data_fsm when a file + can't be created with Parallel HDF5 due to specifying the use of a paged, + persistent file free space manager + (H5Pset_file_space_strategy(..., H5F_FSPACE_STRATEGY_PAGE, 1, ...)) with + an invalid library version bounds combination + (H5Pset_libver_bounds(..., H5F_LIBVER_EARLIEST, H5F_LIBVER_V18)). This + has now been fixed. + + - Fixed an assertion in a previous fix for CVE-2016-4332 + + An assert could fail when processing corrupt files that have invalid + shared message flags (as in CVE-2016-4332). + + The assert statement in question has been replaced with pointer checks + that don't raise errors. Since the function is in cleanup code, we do + our best to close and free things, even when presented with partially + initialized structs. + + Fixes CVE-2016-4332 and HDFFV-9950 (confirmed via the cve_hdf5 repo) + + - Fixed performance regression with some compound type conversions + + In-place type conversion was introduced for most use cases in 1.14.2. + While being able to use the read buffer for type conversion potentially + improves performance by performing the entire I/O at once, it also + disables the optimized compound type conversion used when the destination + is a subset of the source. Disabled in-place type conversion when using + this optimized conversion and there is no benefit in terms of the I/O + size. + + - Reading a H5std_string (std::string) via a C++ DataSet previously + truncated the string at the first null byte as if reading a C string. + Fixed length datasets are now read into H5std_string as a fixed length + string of the appropriate size. Variable length datasets will still be + truncated at the first null byte. + + Fixes Github issue #3034 + + - Fixed write buffer overflow in H5O__alloc_chunk + + The overflow was found by OSS-Fuzz https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=58658 + + Java Library + ------------ + - + + + Configuration + ------------- + - Fixes the ordering of INCLUDES when building with CMake + + Include directories in the source or build tree should come before other + directories to prioritize headers in the sources over installed ones. + + Fixes GitHub #1027 + + - The accum test now passes on macOS 12+ (Monterey) w/ CMake + + Due to changes in the way macOS handles LD_LIBRARY_PATH, the accum test + started failing on macOS 12+ when building with CMake. CMake has been + updated to set DYLD_LIBRARY_PATH on macOS and the test now passes. + + Fixes GitHub #2994, #2261, and #1289 + + - Changed the default settings used by CMake for the GZIP filter + + The default for the option HDF5_ENABLE_Z_LIB_SUPPORT was OFF. Now the default is ON. + This was done to match the defaults used by the autotools configure.ac. + In addition, the CMake message level for not finding a suitable filter library was + changed from FATAL_ERROR (which would halt the build process) to WARNING (which + will print a message to stderr). Associated files and documentation were changed to match. + + In addition, the default settings in the config/cmake/cacheinit.cmake file were changed to + allow CMake to disable building the filters if the tgz file could not be found. The option + to allow CMake to download the file from the original Github location requires setting + the ZLIB_USE_LOCALCONTENT option to OFF for gzip. And setting the LIBAEC_USE_LOCALCONTENT + option to OFF for libaec (szip). + + Fixes GitHub issue #2926 + + + Tools + ----- + - Fixed an issue with unmatched MPI messages in ph5diff + + The "manager" MPI rank in ph5diff was unintentionally sending "program end" + messages to its workers twice, leading to an error from MPICH similar to the + following: + + Abort(810645519) on node 1 (rank 1 in comm 0): Fatal error in internal_Finalize: Other MPI error, error stack: + internal_Finalize(50)...........: MPI_Finalize failed + MPII_Finalize(394)..............: + MPIR_Comm_delete_internal(1224).: Communicator (handle=44000000) being freed has 1 unmatched message(s) + MPIR_Comm_release_always(1250)..: + MPIR_finalize_builtin_comms(154): + + + Performance + ------------- + - + + + Fortran API + ----------- + - + + + High-Level Library + ------------------ + - + + + Fortran High-Level APIs + ----------------------- + - + + + Documentation + ------------- + - + + + F90 APIs + -------- + - + + + C++ APIs + -------- + - + + + Testing + ------- + - Disabled running of MPI Atomicity tests for OpenMPI major versions < 5 + + Support for MPI atomicity operations is not implemented for major + versions of OpenMPI less than version 5. This would cause the MPI + atomicity tests for parallel HDF5 to sporadically fail when run + with OpenMPI. Testphdf5 now checks if OpenMPI is being used and will + skip running the atomicity tests if the major version of OpenMPI is + < 5. + + - Fixed Fortran 2003 test with gfortran-v13, optimization levels O2,O3 + + Fixes failing Fortran 2003 test with gfortran, optimization level O2,O3 + with -fdefault-real-16. Fixes GH #2928. + + +Platforms Tested +=================== + + Linux 5.19.0-1023-aws GNU gcc, gfortran, g++ + #24-Ubuntu SMP x86_64 GNU/Linux (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0 + Ubuntu 22.04 Ubuntu clang version 14.0.0-1ubuntu1 + Intel(R) oneAPI DPC++/C++ Compiler 2023.1.0 + ifort (IFORT) 2021.9.0 20230302 + (cmake and autotools) + + Linux 5.16.14-200.fc35 GNU gcc (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + Fedora35 clang version 13.0.0 (Fedora 13.0.0-3.fc35) + (cmake and autotools) + + Linux 5.14.21-cray_shasta_c cray-mpich/8.1.27 + #1 SMP x86_64 GNU/Linux cce/15.0.0 + (frontier) gcc/12.2.0 + (cmake) + + Linux 5.11.0-34-generic GNU gcc (GCC) 9.4.0-1ubuntu1 + #36-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 9.4.0-1ubuntu1 + Ubuntu 20.04 Ubuntu clang version 10.0.0-4ubuntu1 + Intel(R) oneAPI DPC++/C++ Compiler 2023.1.0 + ifort (IFORT) 2021.9.0 20230302 + (cmake and autotools) + + Linux 4.14.0-115.35.1.1chaos aue/openmpi/4.1.4-arm-22.1.0.12 + #1 SMP aarch64 GNU/Linux Arm C/C++/Fortran Compiler version 22.1 + (stria) (based on LLVM 13.0.1) + (cmake) + + Linux 4.14.0-115.35.1.3chaos spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1 + (vortex) GCC 8.3.1 + XL 2021.09.22 + (cmake) + + Linux-4.14.0-115.21.2 spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1, 14.0.5 + (lassen) GCC 8.3.1 + XL 16.1.1.2, 2021.09.22, 2022.08.05 + (cmake) + + Linux-4.12.14-197.99-default cray-mpich/7.7.14 + #1 SMP x86_64 GNU/Linux cce 12.0.3 + (theta) GCC 11.2.0 + llvm 9.0 + Intel 19.1.2 + + Linux 3.10.0-1160.36.2.el7.ppc64 gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + #1 SMP ppc64be GNU/Linux g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + Power8 (echidna) GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + + Linux 3.10.0-1160.24.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos7 Version 4.8.5 20150623 (Red Hat 4.8.5-4) + (jelly/kituo/moohan) Version 4.9.3, Version 7.2.0, Version 8.3.0, + Version 9.1.0, Version 10.2.0 + Intel(R) C (icc), C++ (icpc), Fortran (icc) + compilers: + Version 17.0.0.098 Build 20160721 + GNU C (gcc) and C++ (g++) 4.8.5 compilers + with NAG Fortran Compiler Release 7.1(Hanzomon) + Intel(R) C (icc) and C++ (icpc) 17.0.0.098 compilers + with NAG Fortran Compiler Release 7.1(Hanzomon) + MPICH 3.1.4 compiled with GCC 4.9.3 + MPICH 3.3 compiled with GCC 7.2.0 + OpenMPI 3.1.3 compiled with GCC 7.2.0 and 4.1.2 + compiled with GCC 9.1.0 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Versions 18.4.0 and 19.10-0 + NVIDIA nvc, nvfortran and nvc++ version 22.5-0 + (autotools and cmake) + + + Linux-3.10.0-1160.0.0.1chaos openmpi-4.1.2 + #1 SMP x86_64 GNU/Linux clang 6.0.0, 11.0.1 + (quartz) GCC 7.3.0, 8.1.0 + Intel 19.0.4, 2022.2, oneapi.2022.2 + + Linux-3.10.0-1160.90.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (skybridge) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.90.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (attaway) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.90.1.1chaos openmpi-intel/4.1 + #1 SMP x86_64 GNU/Linux Intel/19.1.2, 21.3.0 and 22.2.0 + (chama) (cmake) + + macOS Apple M1 11.6 Apple clang version 12.0.5 (clang-1205.0.22.11) + Darwin 20.6.0 arm64 gfortran GNU Fortran (Homebrew GCC 11.2.0) 11.1.0 + (macmini-m1) Intel icc/icpc/ifort version 2021.3.0 202106092021.3.0 20210609 + + macOS Big Sur 11.3.1 Apple clang version 12.0.5 (clang-1205.0.22.9) + Darwin 20.4.0 x86_64 gfortran GNU Fortran (Homebrew GCC 10.2.0_3) 10.2.0 + (bigsur-1) Intel icc/icpc/ifort version 2021.2.0 20210228 + + Mac OS X El Capitan 10.11.6 Apple clang version 7.3.0 from Xcode 7.3 + 64-bit gfortran GNU Fortran (GCC) 5.2.0 + (osx1011test) Intel icc/icpc/ifort version 16.0.2 + + Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos6 Version 4.4.7 20120313 + (platypus) Version 4.9.3, 5.3.0, 6.2.0 + MPICH 3.1.4 compiled with GCC 4.9.3 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Version 19.10-0 + + Windows 10 x64 Visual Studio 2019 w/ clang 12.0.0 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2019 w/ Intel oneAPI 2023.2 C/C++ only - cmake) + Visual Studio 2022 w/ clang 16.0.5 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2022 w/ Intel oneAPI 2023.2 (C/C++ only - cmake) + Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake) + + +Known Problems +============== + + Building HDF5 Fortran on Windows with Intel oneAPI 2023.2 currently fails for + this release with link errors. As a result, Windows binaries for this release + will not include Fortran. The problem will be addressed in HDF5 1.14.4. + + IEEE standard arithmetic enables software to raise exceptions such as overflow, + division by zero, and other illegal operations without interrupting or halting + the program flow. The HDF5 C library intentionally performs these exceptions. + Therefore, the "-ieee=full" nagfor switch is necessary when compiling a program + to avoid stopping on an exception. + + CMake files do not behave correctly with paths containing spaces. + Do not use spaces in paths because the required escaping for handling spaces + results in very complex and fragile build files. + ADB - 2019/05/07 + + At present, metadata cache images may not be generated by parallel + applications. Parallel applications can read files with metadata cache + images, but since this is a collective operation, a deadlock is possible + if one or more processes do not participate. + + CPP ptable test fails on both VS2017 and VS2019 with Intel compiler, JIRA + issue: HDFFV-10628. This test will pass with VS2015 with Intel compiler. + + The subsetting option in ph5diff currently will fail and should be avoided. + The subsetting option works correctly in serial h5diff. + + Several tests currently fail on certain platforms: + MPI_TEST-t_bigio fails with spectrum-mpi on ppc64le platforms. + + MPI_TEST-t_subfiling_vfd and MPI_TEST_EXAMPLES-ph5_subfiling fail with + cray-mpich on theta and with XL compilers on ppc64le platforms. + + MPI_TEST_testphdf5_tldsc fails with cray-mpich 7.7 on cori and theta. + + Known problems in previous releases can be found in the HISTORY*.txt files + in the HDF5 source. Please report any new problems found to + help@hdfgroup.org. + + +CMake vs. Autotools installations +================================= +While both build systems produce similar results, there are differences. +Each system produces the same set of folders on linux (only CMake works +on standard Windows); bin, include, lib and share. Autotools places the +COPYING and RELEASE.txt file in the root folder, CMake places them in +the share folder. + +The bin folder contains the tools and the build scripts. Additionally, CMake +creates dynamic versions of the tools with the suffix "-shared". Autotools +installs one set of tools depending on the "--enable-shared" configuration +option. + build scripts + ------------- + Autotools: h5c++, h5cc, h5fc + CMake: h5c++, h5cc, h5hlc++, h5hlcc + +The include folder holds the header files and the fortran mod files. CMake +places the fortran mod files into separate shared and static subfolders, +while Autotools places one set of mod files into the include folder. Because +CMake produces a tools library, the header files for tools will appear in +the include folder. + +The lib folder contains the library files, and CMake adds the pkgconfig +subfolder with the hdf5*.pc files used by the bin/build scripts created by +the CMake build. CMake separates the C interface code from the fortran code by +creating C-stub libraries for each Fortran library. In addition, only CMake +installs the tools library. The names of the szip libraries are different +between the build systems. + +The share folder will have the most differences because CMake builds include +a number of CMake specific files for support of CMake's find_package and support +for the HDF5 Examples CMake project. + +The issues with the gif tool are: + HDFFV-10592 CVE-2018-17433 + HDFFV-10593 CVE-2018-17436 + HDFFV-11048 CVE-2020-10809 +These CVE issues have not yet been addressed and are avoided by not building +the gif tool by default. Enable building the High-Level tools with these options: + autotools: --enable-hlgiftools + cmake: HDF5_BUILD_HL_GIF_TOOLS=ON + + +%%%%1.14.2%%%% + +HDF5 version 1.14.2 released on 2023-08-11 +================================================================================ + + +INTRODUCTION +============ + +This document describes the differences between this release and the previous +HDF5 release. It contains information on the platforms tested and known +problems in this release. For more details check the HISTORY*.txt files in the +HDF5 source. + +Note that documentation in the links below will be updated at the time of each +final release. + +Links to HDF5 documentation can be found on The HDF5 web page: + + https://portal.hdfgroup.org/display/HDF5/HDF5 + +The official HDF5 releases can be obtained from: + + https://www.hdfgroup.org/downloads/hdf5/ + +Changes from release to release and new features in the HDF5-1.14.x release series +can be found at: + + https://portal.hdfgroup.org/display/HDF5/Release+Specific+Information + +If you have any questions or comments, please send them to the HDF Help Desk: + + help@hdfgroup.org + + +CONTENTS +======== + +- New Features +- Support for new platforms and languages +- Bug Fixes since HDF5-1.14.1 +- Platforms Tested +- Known Problems +- CMake vs. Autotools installations + + +New Features +============ + + Configuration: + ------------- + - Updated HDF5 API tests CMake code to support VOL connectors + + * Implemented support for fetching, building and testing HDF5 + VOL connectors during the library build process and documented + the feature under doc/cmake-vols-fetchcontent.md + + * Implemented the HDF5_TEST_API_INSTALL option that enables + installation of the HDF5 API tests on the system + + + Library: + -------- + - Added support for in-place type conversion in most cases + + In-place type conversion allows the library to perform type conversion + without an intermediate type conversion buffer. This can improve + performance by allowing I/O in a single operation over the entire + selection instead of being limited by the size of the intermediate buffer. + Implemented for I/O on contiguous and chunked datasets when the selection + is contiguous in memory and when the memory datatype is not smaller than + the file datatype. + + - Changed selection I/O to be on by default when using the MPIO file driver + + - Added support for selection I/O in the MPIO file driver + + Previously, only vector I/O operations were supported. Support for + selection I/O should improve performance and reduce memory uses in some + cases. + + - Change the error handling for a not found path in the find plugin process. + + While attempting to load a plugin the HDF5 library will fail if one of the + directories in the plugin paths does not exist, even if there are more paths + to check. Instead of exiting the function with an error, just log the error + and continue processing the list of paths to check. + + + Parallel Library: + ----------------- + - + + + Fortran Library: + ---------------- + - + + + C++ Library: + ------------ + - + + + Java Library: + ------------- + - + + + Tools: + ------ + - + + + High-Level APIs: + ---------------- + - + + + C Packet Table API: + ------------------- + - + + + Internal header file: + --------------------- + - + + + Documentation: + -------------- + - + + +Support for new platforms, languages and compilers +================================================== + - Linux 5.14.21-cray_shasta_c + #1 SMP x86_64 GNU/Linux + (frontier) + + +Bug Fixes since HDF5-1.14.1 release +=================================== + Library + ------- + - Fixed bugs in selection I/O + + Previously, the library could fail in some cases when performing selection + I/O with type conversion. + + - Fixed CVE-2018-13867 + + A corrupt file containing an invalid local heap datablock address + could trigger an assert failure when the metadata cache attempted + to load the datablock from storage. + + The local heap now verifies that the datablock address is valid + when the local heap header information is parsed. + + - Fixed CVE-2018-11202 + + A malformed file could result in chunk index memory leaks. Under most + conditions (i.e., when the --enable-using-memchecker option is NOT + used), this would result in a small memory leak and and infinite loop + and abort when shutting down the library. The infinite loop would be + due to the "free list" package not being able to clear its resources + so the library couldn't shut down. When the "using a memory checker" + option is used, the free lists are disabled so there is just a memory + leak with no abort on library shutdown. + + The chunk index resources are now correctly cleaned up when reading + misparsed files and valgrind confirms no memory leaks. + + - Fixed an issue where an assert statement was converted to an + incorrect error check statement + + An assert statement in the library dealing with undefined dataset data + fill values was converted to an improper error check that would always + trigger when a dataset's fill value was set to NULL (undefined). This + has now been fixed. + + - Fixed an assertion failure when attempting to use the Subfiling IOC + VFD directly + + The Subfiling feature makes use of two Virtual File Drivers, the + Subfiling VFD and the IOC (I/O Concentrator) VFD. The two VFDs are + intended to be stacked together such that the Subfiling VFD sits + "on top" of the IOC VFD and routes I/O requests through it; using the + IOC VFD alone is currently unsupported. The IOC VFD has been fixed so + that an error message is displayed in this situation rather than causing + an assertion failure. + + - Fixed a potential bug when copying empty enum datatypes + + Copying an empty enum datatype (including implicitly, as when an enum + is a part of a compound datatype) would fail in an assert in debug + mode and could fail in release mode depending on how the platform + handles undefined behavior regarding size 0 memory allocations and + using memcpy with a NULL src pointer. + + The library is now more careful about using memory operations when + copying empty enum datatypes and will not error or raise an assert. + + - Added an AAPL check to H5Acreate + + A check was added to H5Acreate to ensure that a failure is correctly + returned when an invalid Attribute Access Property List is passed + in to the function. The HDF5 API tests were failing for certain + build types due to this condition not being checked previously. + + + Java Library + ------------ + - Fixed switch case 'L' block missing a break statement. + + The HDF5Array.arrayify method is missing a break statement in the case 'L': section + which causes it to fall through and throw an HDF5JavaException when attempting to + read an Array[Array[Long]]. + + The error was fixed by inserting a break statement at the end of the case 'L': sections. + + Fixes GitHub issue #3056 + + + Configuration + ------------- + - Fixed a configuration issue that prevented building of the Subfiling VFD on macOS + + Checks were added to the CMake and Autotools code to verify that CLOCK_MONOTONIC_COARSE, + PTHREAD_MUTEX_ADAPTIVE_NP and pthread_condattr_setclock() are available before attempting + to use them in Subfiling VFD-related utility code. Without these checks, attempting + to build the Subfiling VFD on macOS would fail. + + + Tools + ----- + - Fixed an issue in h5repack for variable-length typed datasets + + When repacking datasets into a new file, h5repack tries to determines whether + it can use H5Ocopy to copy each dataset into the new file, or if it needs to + manually re-create the dataset, then read data from the old dataset and write + it to the new dataset. H5repack was previously using H5Ocopy for datasets with + variable-length datatypes, but this can be problematic if the global heap + addresses involved do not match exactly between the old and new files. These + addresses could change for a variety of reasons, such as the command-line options + provided to h5repack, how h5repack allocates space in the repacked file, etc. + Since H5Ocopy does not currently perform any translation when these addresses + change, datasets that were repacked with H5Ocopy could become unreadable in the + new file. H5repack has been fixed to repack variable-length typed datasets without + using H5Ocopy to ensure that the new datasets always have the correct global heap + addresses. + + + Performance + ------------- + - + + + Fortran API + ----------- + - + + High-Level Library + ------------------ + - + + + Fortran High-Level APIs + ----------------------- + - + + + Documentation + ------------- + - + + + F90 APIs + -------- + - + + + C++ APIs + -------- + - + + + Testing + ------- + - Fixed a testing failure in testphdf5 on Cray machines + + On some Cray machines, what appears to be a bug in Cray MPICH was causing + calls to H5Fis_accessible to create a 0-byte file with strange Unix + permissions. This was causing an H5Fdelete file deletion test in the + testphdf5 program to fail due to a just-deleted HDF5 file appearing to + still be accessible on the file system. The issue in Cray MPICH has been + worked around for the time being by resetting the MPI_Info object on the + File Access Property List used to MPI_INFO_NULL before passing it to the + H5Fis_accessible call. + + - A bug was fixed in the HDF5 API test random datatype generation code + + A bug in the random datatype generation code could cause test failures + when trying to generate an enumeration datatype that has duplicated + name/value pairs in it. This has now been fixed. + + - A bug was fixed in the HDF5 API test VOL connector registration checking code + + The HDF5 API test code checks to see if the VOL connector specified by the + HDF5_VOL_CONNECTOR environment variable (if any) is registered with the library + before attempting to run tests with it so that testing can be skipped and an + error can be returned when a VOL connector fails to register successfully. + Previously, this code didn't account for VOL connectors that specify extra + configuration information in the HDF5_VOL_CONNECTOR environment variable and + would incorrectly report that the specified VOL connector isn't registered + due to including the configuration information as part of the VOL connector + name being checked for registration status. This has now been fixed. + + +Platforms Tested +=================== + + Linux 5.19.0-1023-aws GNU gcc, gfortran, g++ + #24-Ubuntu SMP x86_64 GNU/Linux (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0 + Ubuntu 22.04 Ubuntu clang version 14.0.0-1ubuntu1 + Intel(R) oneAPI DPC++/C++ Compiler 2023.1.0 + ifort (IFORT) 2021.9.0 20230302 + (cmake and autotools) + + Linux 5.16.14-200.fc35 GNU gcc (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + Fedora35 clang version 13.0.0 (Fedora 13.0.0-3.fc35) + (cmake and autotools) + + Linux 5.14.21-cray_shasta_c cray-mpich/8.1.23 + #1 SMP x86_64 GNU/Linux cce/15.0.0 + (frontier) gcc/12.2.0 + (cmake) + + Linux 5.11.0-34-generic GNU gcc (GCC) 9.4.0-1ubuntu1 + #36-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 9.4.0-1ubuntu1 + Ubuntu 20.04 Ubuntu clang version 10.0.0-4ubuntu1 + Intel(R) oneAPI DPC++/C++ Compiler 2023.1.0 + ifort (IFORT) 2021.9.0 20230302 + (cmake and autotools) + + Linux 4.14.0-115.35.1.1chaos aue/openmpi/4.1.4-arm-22.1.0.12 + #1 SMP aarch64 GNU/Linux Arm C/C++/Fortran Compiler version 22.1 + (stria) (based on LLVM 13.0.1) + (cmake) + + Linux 4.14.0-115.35.1.3chaos spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1 + (vortex) GCC 8.3.1 + XL 2021.09.22 + (cmake) + + Linux-4.14.0-115.21.2 spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1, 14.0.5 + (lassen) GCC 8.3.1 + XL 16.1.1.2, 2021.09.22, 2022.08.05 + (cmake) + + Linux-4.12.14-197.99-default cray-mpich/7.7.14 + #1 SMP x86_64 GNU/Linux cce 12.0.3 + (theta) GCC 11.2.0 + llvm 9.0 + Intel 19.1.2 + + Linux 3.10.0-1160.36.2.el7.ppc64 gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + #1 SMP ppc64be GNU/Linux g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + Power8 (echidna) GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + + Linux 3.10.0-1160.24.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos7 Version 4.8.5 20150623 (Red Hat 4.8.5-4) + (jelly/kituo/moohan) Version 4.9.3, Version 7.2.0, Version 8.3.0, + Version 9.1.0, Version 10.2.0 + Intel(R) C (icc), C++ (icpc), Fortran (icc) + compilers: + Version 17.0.0.098 Build 20160721 + GNU C (gcc) and C++ (g++) 4.8.5 compilers + with NAG Fortran Compiler Release 7.1(Hanzomon) + Intel(R) C (icc) and C++ (icpc) 17.0.0.098 compilers + with NAG Fortran Compiler Release 7.1(Hanzomon) + MPICH 3.1.4 compiled with GCC 4.9.3 + MPICH 3.3 compiled with GCC 7.2.0 + OpenMPI 3.1.3 compiled with GCC 7.2.0 and 4.1.2 + compiled with GCC 9.1.0 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Versions 18.4.0 and 19.10-0 + NVIDIA nvc, nvfortran and nvc++ version 22.5-0 + (autotools and cmake) + + + Linux-3.10.0-1160.0.0.1chaos openmpi-4.1.2 + #1 SMP x86_64 GNU/Linux clang 6.0.0, 11.0.1 + (quartz) GCC 7.3.0, 8.1.0 + Intel 19.0.4, 2022.2, oneapi.2022.2 + + Linux-3.10.0-1160.90.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (skybridge) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.90.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (attaway) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.90.1.1chaos openmpi-intel/4.1 + #1 SMP x86_64 GNU/Linux Intel/19.1.2, 21.3.0 and 22.2.0 + (chama) (cmake) + + macOS Apple M1 11.6 Apple clang version 12.0.5 (clang-1205.0.22.11) + Darwin 20.6.0 arm64 gfortran GNU Fortran (Homebrew GCC 11.2.0) 11.1.0 + (macmini-m1) Intel icc/icpc/ifort version 2021.3.0 202106092021.3.0 20210609 + + macOS Big Sur 11.3.1 Apple clang version 12.0.5 (clang-1205.0.22.9) + Darwin 20.4.0 x86_64 gfortran GNU Fortran (Homebrew GCC 10.2.0_3) 10.2.0 + (bigsur-1) Intel icc/icpc/ifort version 2021.2.0 20210228 + + Mac OS X El Capitan 10.11.6 Apple clang version 7.3.0 from Xcode 7.3 + 64-bit gfortran GNU Fortran (GCC) 5.2.0 + (osx1011test) Intel icc/icpc/ifort version 16.0.2 + + + Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos6 Version 4.4.7 20120313 + (platypus) Version 4.9.3, 5.3.0, 6.2.0 + MPICH 3.1.4 compiled with GCC 4.9.3 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Version 19.10-0 + + Windows 10 x64 Visual Studio 2019 w/ clang 12.0.0 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2019 w/ Intel C/C++ only cmake) + Visual Studio 2022 w/ clang 15.0.1 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2022 w/ Intel C/C++/Fortran oneAPI 2023 (cmake) + Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake) + + +Known Problems +============== + + CMake files do not behave correctly with paths containing spaces. + Do not use spaces in paths because the required escaping for handling spaces + results in very complex and fragile build files. + ADB - 2019/05/07 + + At present, metadata cache images may not be generated by parallel + applications. Parallel applications can read files with metadata cache + images, but since this is a collective operation, a deadlock is possible + if one or more processes do not participate. + + CPP ptable test fails on both VS2017 and VS2019 with Intel compiler, JIRA + issue: HDFFV-10628. This test will pass with VS2015 with Intel compiler. + + The subsetting option in ph5diff currently will fail and should be avoided. + The subsetting option works correctly in serial h5diff. + + Several tests currently fail on certain platforms: + MPI_TEST-t_bigio fails with spectrum-mpi on ppc64le platforms. + + MPI_TEST-t_subfiling_vfd and MPI_TEST_EXAMPLES-ph5_subfiling fail with + cray-mpich on theta and with XL compilers on ppc64le platforms. + + MPI_TEST_testphdf5_tldsc fails with cray-mpich 7.7 on theta. + + Known problems in previous releases can be found in the HISTORY*.txt files + in the HDF5 source. Please report any new problems found to + help@hdfgroup.org. + + +CMake vs. Autotools installations +================================= +While both build systems produce similar results, there are differences. +Each system produces the same set of folders on linux (only CMake works +on standard Windows); bin, include, lib and share. Autotools places the +COPYING and RELEASE.txt file in the root folder, CMake places them in +the share folder. + +The bin folder contains the tools and the build scripts. Additionally, CMake +creates dynamic versions of the tools with the suffix "-shared". Autotools +installs one set of tools depending on the "--enable-shared" configuration +option. + build scripts + ------------- + Autotools: h5c++, h5cc, h5fc + CMake: h5c++, h5cc, h5hlc++, h5hlcc + +The include folder holds the header files and the fortran mod files. CMake +places the fortran mod files into separate shared and static subfolders, +while Autotools places one set of mod files into the include folder. Because +CMake produces a tools library, the header files for tools will appear in +the include folder. + +The lib folder contains the library files, and CMake adds the pkgconfig +subfolder with the hdf5*.pc files used by the bin/build scripts created by +the CMake build. CMake separates the C interface code from the fortran code by +creating C-stub libraries for each Fortran library. In addition, only CMake +installs the tools library. The names of the szip libraries are different +between the build systems. + +The share folder will have the most differences because CMake builds include +a number of CMake specific files for support of CMake's find_package and support +for the HDF5 Examples CMake project. + +The issues with the gif tool are: + HDFFV-10592 CVE-2018-17433 + HDFFV-10593 CVE-2018-17436 + HDFFV-11048 CVE-2020-10809 +These CVE issues have not yet been addressed and are avoided by not building +the gif tool by default. Enable building the High-Level tools with these options: + autotools: --enable-hlgiftools + cmake: HDF5_BUILD_HL_GIF_TOOLS=ON + + +%%%%1.14.1%%%% + +HDF5 version 1.14.1-2 released on 2023-05-11 +================================================================================ +HDF5 1.14.1-2 is a patch release for HDF5 1.14.1. The only change in the patch +release is that Autoconf 2.71 was used to generate the Autotools build files, +which allows building with Intel's oneAPI. + + +INTRODUCTION +============ + +This document describes the differences between this release and the previous +HDF5 release. It contains information on the platforms tested and known +problems in this release. For more details check the HISTORY*.txt files in the +HDF5 source. + +Note that documentation in the links below will be updated at the time of each +final release. + +Links to HDF5 documentation can be found on The HDF5 web page: + + https://portal.hdfgroup.org/display/HDF5/HDF5 + +The official HDF5 releases can be obtained from: + + https://www.hdfgroup.org/downloads/hdf5/ + +Changes from release to release and new features in the HDF5-1.14.x release series +can be found at: + + https://portal.hdfgroup.org/display/HDF5/Release+Specific+Information + +If you have any questions or comments, please send them to the HDF Help Desk: + + help@hdfgroup.org + + +CONTENTS +======== + +- New Features +- Support for new platforms and languages +- Bug Fixes since HDF5-1.14.0 +- Platforms Tested +- Known Problems +- CMake vs. Autotools installations + + +New Features +============ + + Configuration: + ------------- + - Added new CMake options for building and running HDF5 API tests + (Experimental) + + HDF5 API tests are an experimental feature, primarily targeted + toward HDF5 VOL connector authors, that is currently being developed. + These tests exercise the HDF5 API and are being integrated back + into the HDF5 library from the HDF5 VOL tests repository + (https://github.com/HDFGroup/vol-tests). To support this feature, + the following new options have been added to CMake: + + * HDF5_TEST_API: ON/OFF (Default: OFF) + + Controls whether the HDF5 API tests will be built. These tests + will only be run during testing of HDF5 if the HDF5_TEST_SERIAL + (for serial tests) and HDF5_TEST_PARALLEL (for parallel tests) + options are enabled. + + * HDF5_TEST_API_INSTALL: ON/OFF (Default: OFF) + + Controls whether the HDF5 API test executables will be installed + on the system alongside the HDF5 library. This option is currently + not functional. + + * HDF5_TEST_API_ENABLE_ASYNC: ON/OFF (Default: OFF) + + Controls whether the HDF5 Async API tests will be built. These + tests will only be run if the VOL connector used supports Async + operations. + + * HDF5_TEST_API_ENABLE_DRIVER: ON/OFF (Default: OFF) + + Controls whether to build the HDF5 API test driver program. This + test driver program is useful for VOL connectors that use a + client/server model where the server needs to be up and running + before the VOL connector can function. This option is currently + not functional. + + * HDF5_TEST_API_SERVER: String (Default: "") + + Used to specify a path to the server executable that the test + driver program should execute. + + - Added support for CMake presets file. + + CMake supports two main files, CMakePresets.json and CMakeUserPresets.json, + that allow users to specify common configure options and share them with others. + HDF added a CMakePresets.json file of a typical configuration and support + file, config/cmake-presets/hidden-presets.json. + Also added a section to INSTALL_CMake.txt with very basic explanation of the + process to use CMakePresets. + + - Deprecated and removed old SZIP library in favor of LIBAEC library + + LIBAEC library has been used in HDF5 binaries as the szip library of choice + for a few years. We are removing the options for using the old SZIP library. + + Also removed the config/cmake/FindSZIP.cmake file. + + - Enabled instrumentation of the library by default in CMake for parallel + debug builds + + HDF5 can be configured to instrument portions of the parallel library to + aid in debugging. Autotools builds of HDF5 turn this capability on by + default for parallel debug builds and off by default for other build types. + CMake has been updated to match this behavior. + + - Added new option to build libaec and zlib inline with CMake. + + Using the CMake FetchContent module, the external filters can populate + content at configure time via any method supported by the ExternalProject + module. Whereas ExternalProject_Add() downloads at build time, the + FetchContent module makes content available immediately, allowing the + configure step to use the content in commands like add_subdirectory(), + include() or file() operations. + + The HDF options (and defaults) for using this are: + BUILD_SZIP_WITH_FETCHCONTENT:BOOL=OFF + LIBAEC_USE_LOCALCONTENT:BOOL=OFF + BUILD_ZLIB_WITH_FETCHCONTENT:BOOL=OFF + ZLIB_USE_LOCALCONTENT:BOOL=OFF + + The CMake variables to control the path and file names: + LIBAEC_TGZ_ORIGPATH:STRING + LIBAEC_TGZ_ORIGNAME:STRING + ZLIB_TGZ_ORIGPATH:STRING + ZLIB_TGZ_ORIGNAME:STRING + + See the CMakeFilters.cmake and config/cmake/cacheinit.cmake files for usage. + + + Library: + -------- + - Added a Subfiling VFD configuration file prefix environment variable + + The Subfiling VFD now checks for values set in a new environment + variable "H5FD_SUBFILING_CONFIG_FILE_PREFIX" to determine if the + application has specified a pathname prefix to apply to the file + path for its configuration file. For example, this can be useful + for cases where the application wishes to write subfiles to a + machine's node-local storage while placing the subfiling configuration + file on a file system readable by all machine nodes. + + - Added H5Pset_selection_io(), H5Pget_selection_io(), and + H5Pget_no_selection_io_cause() API functions to manage the selection I/O + feature. This can be used to enable collective I/O with type conversion, + or it can be used with custom VFDs that support vector or selection I/O. + + - Added H5Pset_modify_write_buf() and H5Pget_modify_write_buf() API + functions to allow the library to modify the contents of write buffers, in + order to avoid malloc/memcpy. Currently only used for type conversion + with selection I/O. + + + Parallel Library: + ----------------- + - + + + Fortran Library: + ---------------- + - Fortran async APIs H5A, H5D, H5ES, H5G, H5F, H5L and H5O were added. + + - Added Fortran APIs: + h5pset_selection_io_f, h5pget_selection_io_f + h5pset_modify_write_buf_f, h5pget_modify_write_buf_f + + C++ Library: + ------------ + - + + + Java Library: + ------------- + - + + + Tools: + ------ + - + + + High-Level APIs: + ---------------- + - + + + C Packet Table API: + ------------------- + - + + + Internal header file: + --------------------- + - + + + Documentation: + -------------- + - Ported the existing VOL Connector Author Guide document to doxygen. + + Added new dox file, VOLConnGuide.dox. + + +Support for new platforms, languages and compilers +================================================== + - + + +Bug Fixes since HDF5-1.14.0 release +=================================== + Library + ------- + - Fixed a bug in H5Ocopy that could generate invalid HDF5 files + + H5Ocopy was missing a check to determine whether the new object's + object header version is greater than version 1. Without this check, + copying of objects with object headers that are smaller than a + certain size would cause H5Ocopy to create an object header for the + new object that has a gap in the header data. According to the + HDF5 File Format Specification, this is not allowed for version + 1 of the object header format. + + Fixes GitHub issue #2653 + + - Fixed H5Pget_vol_cap_flags and H5Pget_vol_id to accept H5P_DEFAULT + + H5Pget_vol_cap_flags and H5Pget_vol_id were updated to correctly + accept H5P_DEFAULT for the 'plist_id' FAPL parameter. Previously, + they would fail if provided with H5P_DEFAULT as the FAPL. + + - Fixed ROS3 VFD anonymous credential usage with h5dump and h5ls + + ROS3 VFD anonymous credential functionality became broken in h5dump + and h5ls in the HDF5 1.14.0 release with the added support for VFD + plugins, which changed the way that the tools handled setting of + credential information that the VFD uses. The tools could be + provided the command-line option of "--s3-cred=(,,)" as a workaround + for anonymous credential usage, but the documentation for this + option stated that anonymous credentials could be used by simply + omitting the option. The latter functionality has been restored. + + Fixes GitHub issue #2406 + + - Fixed memory leaks when processing malformed object header continuation messages + + Malformed object header continuation messages can result in a too-small + buffer being passed to the decode function, which could lead to reading + past the end of the buffer. Additionally, errors in processing these + malformed messages can lead to allocated memory not being cleaned up. + + This fix adds bounds checking and cleanup code to the object header + continuation message processing. + + Fixes GitHub issue #2604 + + - Fixed memory leaks, aborts, and overflows in H5O EFL decode + + The external file list code could call assert(), read past buffer + boundaries, and not properly clean up resources when parsing malformed + external data files messages. + + This fix cleans up allocated memory, adds buffer bounds checks, and + converts asserts to HDF5 error checking. + + Fixes GitHub issue #2605 + + - Fixed potential heap buffer overflow in decoding of link info message + + Detections of buffer overflow were added for decoding version, index + flags, link creation order value, and the next three addresses. The + checkings will remove the potential invalid read of any of these + values that could be triggered by a malformed file. + + Fixes GitHub issue #2603 + + - Memory leak + + Memory leak was detected when running h5dump with "pov". The memory was allocated + via H5FL__malloc() in hdf5/src/H5FL.c + + The fuzzed file "pov" was an HDF5 file containing an illegal continuation message. + When deserializing the object header chunks for the file, memory is allocated for the + array of continuation messages (cont_msg_info->msgs) in continuation message info struct. + As error is encountered in loading the illegal message, the memory allocated for + cont_msg_info->msgs needs to be freed. + + Fixes GitHub issue #2599 + + - Fixed memory leaks that could occur when reading a dataset from a + malformed file + + When attempting to read layout, pline, and efl information for a + dataset, memory leaks could occur if attempting to read pline/efl + information threw an error, which is due to the memory that was + allocated for pline and efl not being properly cleaned up on error. + + Fixes GitHub issue #2602 + + - Fixed potential heap buffer overrun in group info header decoding from malformed file + + H5O__ginfo_decode could sometimes read past allocated memory when parsing a + group info message from the header of a malformed file. + + It now checks buffer size before each read to properly throw an error in these cases. + + Fixes GitHub issue #2601 + + - Fixed potential buffer overrun issues in some object header decode routines + + Several checks were added to H5O__layout_decode and H5O__sdspace_decode to + ensure that memory buffers don't get overrun when decoding buffers read from + a (possibly corrupted) HDF5 file. + + - Fixed issues in the Subfiling VFD when using the SELECT_IOC_EVERY_NTH_RANK + or SELECT_IOC_TOTAL I/O concentrator selection strategies + + Multiple bugs involving these I/O concentrator selection strategies + were fixed, including: + + * A bug that caused the selection strategy to be altered when + criteria for the strategy was specified in the + H5FD_SUBFILING_IOC_SELECTION_CRITERIA environment variable as + a single value, rather than in the old and undocumented + 'integer:integer' format + * Two bugs which caused a request for 'N' I/O concentrators to + result in 'N - 1' I/O concentrators being assigned, which also + lead to issues if only 1 I/O concentrator was requested + + Also added a regression test for these two I/O concentrator selection + strategies to prevent future issues. + + - Fixed a heap buffer overflow that occurs when reading from + a dataset with a compact layout within a malformed HDF5 file + + During opening of a dataset that has a compact layout, the + library allocates a buffer that stores the dataset's raw data. + The dataset's object header that gets written to the file + contains information about how large of a buffer the library + should allocate. If this object header is malformed such that + it causes the library to allocate a buffer that is too small + to hold the dataset's raw data, future I/O to the dataset can + result in heap buffer overflows. To fix this issue, an extra + check is now performed for compact datasets to ensure that + the size of the allocated buffer matches the expected size + of the dataset's raw data (as calculated from the dataset's + dataspace and datatype information). If the two sizes do not + match, opening of the dataset will fail. + + Fixes GitHub issue #2606 + + - Fixed a memory corruption issue that can occur when reading + from a dataset using a hyperslab selection in the file + dataspace and a point selection in the memory dataspace + + When reading from a dataset using a hyperslab selection in + the dataset's file dataspace and a point selection in the + dataset's memory dataspace where the file dataspace's "rank" + is greater than the memory dataspace's "rank", memory corruption + could occur due to an incorrect number of selection points + being copied when projecting the point selection onto the + hyperslab selection's dataspace. + + - Fixed an issue with collective metadata writes of global heap data + + New test failures in parallel netCDF started occurring with debug + builds of HDF5 due to an assertion failure and this was reported in + GitHub issue #2433. The assertion failure began happening after the + collective metadata write pathway in the library was updated to use + vector I/O so that parallel-enabled HDF5 Virtual File Drivers (other + than the existing MPI I/O VFD) can support collective metadata writes. + + The assertion failure was fixed by updating collective metadata writes + to treat global heap metadata as raw data, as done elsewhere in the + library. + + Fixes GitHub issue #2433 + + - Fix CVE-2021-37501 / GHSA-rfgw-5vq3-wrjf + + Check for overflow when calculating on-disk attribute data size. + + A bogus hdf5 file may contain dataspace messages with sizes + which lead to the on-disk data sizes to exceed what is addressable. + When calculating the size, make sure, the multiplication does not + overflow. + The test case was crafted in a way that the overflow caused the + size to be 0. + + Fixes GitHub issue #2458 + + - Fixed buffer overflow error in image decoding function. + + The error occurred in the function for decoding address from the specified + buffer, which is called many times from the function responsible for image + decoding. The length of the buffer is known in the image decoding function, + but no checks are produced, so the buffer overflow can occur in many places, + including callee functions for address decoding. + + The error was fixed by inserting corresponding checks for buffer overflow. + + Fixes GitHub issue #2432 + + + Java Library + ------------ + - + + + Configuration + ------------- + - Fixed syntax of generator expressions used by CMake + + Add quotes around the generator expression should allow CMake to + correctly parse the expression. Generator expressions are typically + parsed after command arguments. If a generator expression contains + spaces, new lines, semicolons or other characters that may be + interpreted as command argument separators, the whole expression + should be surrounded by quotes when passed to a command. Failure to + do so may result in the expression being split and it may no longer + be recognized as a generator expression. + + Fixes GitHub issue #2906 + + - Fixed improper include of Subfiling VFD build directory + + With the release of the Subfiling Virtual File Driver feature, compiler + flags were added to the Autotools build's CPPFLAGS and AM_CPPFLAGS + variables to always include the Subfiling VFD source code directory, + regardless of whether the VFD is enabled and built or not. These flags + are needed because the header files for the VFD contain macros that are + assumed to always be available, such as H5FD_SUBFILING_NAME, so the + header files are unconditionally included in the HDF5 library. However, + these flags are only needed when building HDF5, so they belong in the + H5_CPPFLAGS variable instead. Inclusion in the CPPFLAGS and AM_CPPFLAGS + variables would export these flags to the h5cc and h5c++ wrapper scripts, + as well as the libhdf5.settings file, which would break builds of software + that use HDF5 and try to use or parse information out of these files after + deleting temporary HDF5 build directories. + + Fixes GitHub issues #2422 and #2621 + + - Correct the CMake generated pkg-config file + + The pkg-config file generated by CMake had the order and placement of the + libraries wrong. Also added support for debug library names. + + Changed the order of Libs.private libraries so that dependencies come after + dependents. Did not move the compression libraries into Requires.private + because there was not a way to determine if the compression libraries had + supported pkconfig files. Still recommend that the CMake config file method + be used for building projects with CMake. + + Fixes GitHub issues #1546 and #2259 + + - Force lowercase Fortran module file names + + The Cray Fortran compiler uses uppercase Fortran module file names, which + caused CMake installs to fail. A compiler option was added to use lowercase + instead. + + + Tools + ----- + - Names of objects with square brackets will have trouble without the + special argument, --no-compact-subset, on the h5dump command line. + + h5diff did not have this option and now it has been added. + + Fixes GitHub issue #2682 + + - In the tools traverse function - an error in either visit call + will bypass the cleanup of the local data variables. + + Replaced the H5TOOLS_GOTO_ERROR with just H5TOOLS_ERROR. + + Fixes GitHub issue #2598 + + + Performance + ------------- + - + + + Fortran API + ----------- + - + + High-Level Library + ------------------ + - + + + Fortran High-Level APIs + ----------------------- + - + + + Documentation + ------------- + - + + + F90 APIs + -------- + - + + + C++ APIs + -------- + - + + + Testing + ------- + - + + +Platforms Tested +=================== + + Linux 5.16.14-200.fc35 GNU gcc (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + Fedora35 clang version 13.0.0 (Fedora 13.0.0-3.fc35) + (cmake and autotools) + + Linux 5.11.0-34-generic GNU gcc (GCC) 9.3.0-17ubuntu1 + #36-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 9.3.0-17ubuntu1 + Ubuntu 20.04 Ubuntu clang version 10.0.0-4 + (cmake and autotools) + + Linux 5.3.18-150300-cray_shasta_c cray-mpich/8.1.16 + #1 SMP x86_64 GNU/Linux Cray clang 14.0.0 + (crusher) GCC 11.2.0 + (cmake) + + Linux 4.14.0-115.35.1.1chaos openmpi 4.0.5 + #1 SMP aarch64 GNU/Linux GCC 9.2.0 (ARM-build-5) + (stria) GCC 7.2.0 (Spack GCC) + (cmake) + + Linux 4.14.0-115.35.1.3chaos spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1 + (vortex) GCC 8.3.1 + XL 16.1.1 + (cmake) + + Linux-4.14.0-115.21.2 spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1, 14.0.5 + (lassen) GCC 8.3.1 + XL 16.1.1.2, 2021,09.22, 2022.08.05 + (cmake) + + Linux-4.12.14-197.99-default cray-mpich/7.7.14 + #1 SMP x86_64 GNU/Linux cce 12.0.3 + (theta) GCC 11.2.0 + llvm 9.0 + Intel 19.1.2 + + Linux 3.10.0-1160.36.2.el7.ppc64 gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + #1 SMP ppc64be GNU/Linux g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + Power8 (echidna) GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + + Linux 3.10.0-1160.24.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos7 Version 4.8.5 20150623 (Red Hat 4.8.5-4) + (jelly/kituo/moohan) Version 4.9.3, Version 5.3.0, Version 6.3.0, + Version 7.2.0, Version 8.3.0, Version 9.1.0 + Intel(R) C (icc), C++ (icpc), Fortran (icc) + compilers: + Version 17.0.0.098 Build 20160721 + GNU C (gcc) and C++ (g++) 4.8.5 compilers + with NAG Fortran Compiler Release 6.1(Tozai) + Intel(R) C (icc) and C++ (icpc) 17.0.0.098 compilers + with NAG Fortran Compiler Release 6.1(Tozai) + MPICH 3.1.4 compiled with GCC 4.9.3 + MPICH 3.3 compiled with GCC 7.2.0 + OpenMPI 2.1.6 compiled with icc 18.0.1 + OpenMPI 3.1.3 and 4.0.0 compiled with GCC 7.2.0 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Version 19.10-0 + (autotools and cmake) + + Linux-3.10.0-1160.0.0.1chaos openmpi-4.1.2 + #1 SMP x86_64 GNU/Linux clang 6.0.0, 11.0.1 + (quartz) GCC 7.3.0, 8.1.0 + Intel 19.0.4, 2022.2, oneapi.2022.2 + + Linux-3.10.0-1160.71.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (skybridge) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.66.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (attaway) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.59.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux Intel/19.1 + (chama) (cmake) + + macOS Apple M1 11.6 Apple clang version 12.0.5 (clang-1205.0.22.11) + Darwin 20.6.0 arm64 gfortran GNU Fortran (Homebrew GCC 11.2.0) 11.1.0 + (macmini-m1) Intel icc/icpc/ifort version 2021.3.0 202106092021.3.0 20210609 + + macOS Big Sur 11.3.1 Apple clang version 12.0.5 (clang-1205.0.22.9) + Darwin 20.4.0 x86_64 gfortran GNU Fortran (Homebrew GCC 10.2.0_3) 10.2.0 + (bigsur-1) Intel icc/icpc/ifort version 2021.2.0 20210228 + + macOS High Sierra 10.13.6 Apple LLVM version 10.0.0 (clang-1000.10.44.4) + 64-bit gfortran GNU Fortran (GCC) 6.3.0 + (bear) Intel icc/icpc/ifort version 19.0.4.233 20190416 + + macOS Sierra 10.12.6 Apple LLVM version 9.0.0 (clang-900.39.2) + 64-bit gfortran GNU Fortran (GCC) 7.4.0 + (kite) Intel icc/icpc/ifort version 17.0.2 + + Mac OS X El Capitan 10.11.6 Apple clang version 7.3.0 from Xcode 7.3 + 64-bit gfortran GNU Fortran (GCC) 5.2.0 + (osx1011test) Intel icc/icpc/ifort version 16.0.2 + + + Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos6 Version 4.4.7 20120313 + (platypus) Version 4.9.3, 5.3.0, 6.2.0 + MPICH 3.1.4 compiled with GCC 4.9.3 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Version 19.10-0 + + Windows 10 x64 Visual Studio 2015 w/ Intel C/C++/Fortran 18 (cmake) + Visual Studio 2017 w/ Intel C/C++/Fortran 19 (cmake) + Visual Studio 2019 w/ clang 12.0.0 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2019 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) + Visual Studio 2022 w/ clang 15.0.1 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2022 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) + Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake) + + +Known Problems +============== + + CMake files do not behave correctly with paths containing spaces. + Do not use spaces in paths because the required escaping for handling spaces + results in very complex and fragile build files. + ADB - 2019/05/07 + + At present, metadata cache images may not be generated by parallel + applications. Parallel applications can read files with metadata cache + images, but since this is a collective operation, a deadlock is possible + if one or more processes do not participate. + + CPP ptable test fails on both VS2017 and VS2019 with Intel compiler, JIRA + issue: HDFFV-10628. This test will pass with VS2015 with Intel compiler. + + The subsetting option in ph5diff currently will fail and should be avoided. + The subsetting option works correctly in serial h5diff. + + Several tests currently fail on certain platforms: + MPI_TEST-t_bigio fails with spectrum-mpi on ppc64le platforms. + + MPI_TEST-t_subfiling_vfd and MPI_TEST_EXAMPLES-ph5_subfiling fail with + cray-mpich on theta and with XL compilers on ppc64le platforms. + + MPI_TEST_testphdf5_tldsc fails with cray-mpich 7.7 on cori and theta. + + Known problems in previous releases can be found in the HISTORY*.txt files + in the HDF5 source. Please report any new problems found to + help@hdfgroup.org. + + +CMake vs. Autotools installations +================================= +While both build systems produce similar results, there are differences. +Each system produces the same set of folders on linux (only CMake works +on standard Windows); bin, include, lib and share. Autotools places the +COPYING and RELEASE.txt file in the root folder, CMake places them in +the share folder. + +The bin folder contains the tools and the build scripts. Additionally, CMake +creates dynamic versions of the tools with the suffix "-shared". Autotools +installs one set of tools depending on the "--enable-shared" configuration +option. + build scripts + ------------- + Autotools: h5c++, h5cc, h5fc + CMake: h5c++, h5cc, h5hlc++, h5hlcc + +The include folder holds the header files and the fortran mod files. CMake +places the fortran mod files into separate shared and static subfolders, +while Autotools places one set of mod files into the include folder. Because +CMake produces a tools library, the header files for tools will appear in +the include folder. + +The lib folder contains the library files, and CMake adds the pkgconfig +subfolder with the hdf5*.pc files used by the bin/build scripts created by +the CMake build. CMake separates the C interface code from the fortran code by +creating C-stub libraries for each Fortran library. In addition, only CMake +installs the tools library. The names of the szip libraries are different +between the build systems. + +The share folder will have the most differences because CMake builds include +a number of CMake specific files for support of CMake's find_package and support +for the HDF5 Examples CMake project. + +The issues with the gif tool are: + HDFFV-10592 CVE-2018-17433 + HDFFV-10593 CVE-2018-17436 + HDFFV-11048 CVE-2020-10809 +These CVE issues have not yet been addressed and are avoided by not building +the gif tool by default. Enable building the High-Level tools with these options: + autotools: --enable-hlgiftools + cmake: HDF5_BUILD_HL_GIF_TOOLS=ON + + +%%%%1.14.0%%%% + +HDF5 version 1.14.0 released on 2022-12-28 +================================================================================ + + +INTRODUCTION +============ + +This document describes the differences between this release and the previous +HDF5 release. It contains information on the platforms tested and known +problems in this release. For more details check the HISTORY*.txt files in the +HDF5 source. + +Note that documentation in the links below will be updated at the time of each +final release. + +Links to HDF5 documentation can be found on The HDF5 web page: + + https://portal.hdfgroup.org/display/HDF5/HDF5 + +The official HDF5 releases can be obtained from: + + https://www.hdfgroup.org/downloads/hdf5/ + +Changes from Release to Release and New Features in the HDF5-1.13.x release series +can be found at: + + https://portal.hdfgroup.org/display/HDF5/Release+Specific+Information + +If you have any questions or comments, please send them to the HDF Help Desk: + + help@hdfgroup.org + + +CONTENTS +======== + +- New Features +- Support for new platforms and languages +- Bug Fixes since HDF5-1.12.0 +- Platforms Tested +- Known Problems +- CMake vs. Autotools installations + + +New Features +============ + + Configuration: + ------------- + - Removal of MPE support + + The ability to build with MPE instrumentation has been removed along with + the following configure options: + + Autotools: + --with-mpe= + + CMake has never supported building with MPE support. + + (DER - 2022/11/08) + + - Removal of dmalloc support + + The ability to build with dmalloc support has been removed along with + the following configure options: + + Autotools: + --with-dmalloc= + + CMake: + HDF5_ENABLE_USING_DMALLOC + + (DER - 2022/11/08) + + - Removal of memory allocation sanity checks configure options + + With the removal of the memory allocation sanity checks feature, the + following configure options are no longer necessary and have been + removed: + + Autotools: + --enable-memory-alloc-sanity-check + + CMake: + HDF5_MEMORY_ALLOC_SANITY_CHECK + HDF5_ENABLE_MEMORY_STATS + + (DER - 2022/11/03) + + - Add new CMake configuration variable HDF5_USE_GNU_DIRS + + HDF5_USE_GNU_DIRS (default OFF) selects the use of GNU Coding Standard install + directory variables by including the CMake module, GNUInstallDirs(see CMake + documentation for details). The HDF_DIR_PATHS macro in the HDFMacros.cmake file + sets various PATH variables for use during the build, test and install processes. + By default, the historical settings for these variables will be used. + + (ADB - 2022/10/21, GH-2175, GH-1716) + + - Update CMake minimum version to 3.18 + + Updated CMake minimum version from 3.12 to 3.18 and removed version checks + which were added for Windows features not yet available in version 3.12. Also + removed configure macros and code checks for old style code compile checks. + + (ADB - 2022/08/29, HDFFV-11329) + + - Correct the usage of CMAKE_Fortran_MODULE_DIRECTORY and where to + install Fortran mod files. + + The Fortran modules files, ending in .mod are files describing a + Fortran 90 (and above) module API and ABI. These are not like C + header files describing an API, they are compiler dependent and + arch dependent, and not easily readable by a human being. They are + nevertheless searched for in the includes directories by gfortran + (in directories specified with -I). + + Autotools configure uses the -fmoddir option to specify the folder. + CMake will use "mod" folder by default unless overridden by the CMake + variable; HDF5_INSTALL_MODULE_DIR. + + (ADB - 2022/07/21) + + - HDF5 memory allocation sanity checking is now off by default for + Autotools debug builds + + HDF5 can be configured to perform sanity checking on internal memory + allocations by adding heap canaries to these allocations. However, + enabling this option can cause issues with external filter plugins + when working with (reallocating/freeing/allocating and passing back) + buffers. + + Previously, this option was off by default for all CMake build types, + but only off by default for non-debug Autotools builds. Since debug + is the default build mode for HDF5 when built from source with + Autotools, this can result in surprising segfaults that don't occur + when an application is built against a release version of HDF5. + Therefore, this option is now off by default for all build types + across both CMake and Autotools. + + (JTH - 2022/03/01) + + - Reworked corrected path searched by CMake find_package command + + The install path for cmake find_package files had been changed to use + "share/cmake" + for all platforms. However setting the HDF5_ROOT variable failed to locate + the configuration files. The build variable HDF5_INSTALL_CMAKE_DIR is now + set to the /cmake folder. The location of the configuration + files can still be specified by the "HDF5_DIR" variable. + + (ADB - 2022/02/02) + + - CPack will now generate RPM/DEB packages. + + Enabled the RPM and DEB CPack generators on linux. In addition to + generating STGZ and TGZ packages, CPack will try to package the + library for RPM and DEB packages. This is the initial attempt and + may change as issues are resolved. + + (ADB - 2022/01/27) + + - Added new option to the h5cc scripts produced by CMake. + + Add -showconfig option to h5cc scripts to cat the + libhdf5.settings file to the standard output. + + (ADB - 2022/01/25) + + - CMake will now run the PowerShell script tests in test/ by default + on Windows. + + The test directory includes several shell script tests that previously + were not run by CMake on Windows. These are now run by default. + If TEST_SHELL_SCRIPTS is ON and PWSH is found, the PowerShell scripts + will execute. Similar to the bash scripts on unix platforms. + + (ADB - 2021/11/23) + + - Added new configure option to support building parallel tools. + See Tools below (autotools - CMake): + --enable-parallel-tools HDF5_BUILD_PARALLEL_TOOLS + + (RAW - 2021/10/25) + + - Added new configure options to enable dimension scales APIs (H5DS*) to + use new object references with the native VOL connector (aka native HDF5 + library). New references are always used for non-native terminal VOL + connectors (e.g., DAOS). + + Autotools --enable-dimension-scales-with-new-ref + CMake HDF5_DIMENSION_SCALES_NEW_REF=ON + + (EIP - 2021/10/25, HDFFV-11180) + + - Refactored the utils folder. + + Added subfolder test and moved the 'swmr_check_compat_vfd.c file' + from test into utils/test. Deleted the duplicate swmr_check_compat_vfd.c + file in hl/tools/h5watch folder. Also fixed vfd check options. + + (ADB - 2021/10/18) + + - Changed autotools and CMake configurations to derive both + compilation warnings-as-errors and warnings-only-warn configurations + from the same files, 'config/*/*error*'. Removed redundant files + 'config/*/*noerror*'. + + (DCY - 2021/09/29) + + - Adds C++ Autotools configuration file for Intel + + * Checks for icpc as the compiler + * Sets std=c++11 + * Copies most non-warning flags from intel-flags + + (DER - 2021/06/02) + + - Adds C++ Autotools configuration file for PGI + + * Checks for pgc++ as the compiler name (was: pgCC) + * Sets -std=c++11 + * Other options basically match new C options (below) + + (DER - 2021/06/02) + + - Updates PGI C options + + * -Minform set to warn (was: inform) to suppress spurious messages + * Sets -gopt -O2 as debug options + * Sets -O4 as 'high optimization' option + * Sets -O0 as 'no optimization' option + * Removes specific settings for PGI 9 and 10 + + (DER - 2021/06/02) + + - A C++11-compliant compiler is now required to build the C++ wrappers + + CMAKE_CXX_STANDARD is now set to 11 when building with CMake and + -std=c++11 is added when building with clang/gcc via the Autotools. + + (DER - 2021/05/27) + + - CMake will now run the shell script tests in test/ by default + + The test directory includes several shell script tests that previously + were not run by CMake. These are now run by default. TEST_SHELL_SCRIPTS + has been set to ON and SH_PROGRAM has been set to bash (some test + scripts use bash-isms). Platforms without bash (e.g., Windows) will + ignore the script tests. + + (DER - 2021/05/23) + + - Removed unused HDF5_ENABLE_HSIZET option from CMake + + This has been unused for some time and has no effect. + + (DER - 2021/05/23) + + - CMake no longer builds the C++ library by default + + HDF5_BUILD_CPP_LIB now defaults to OFF, which is in line with the + Autotools build defaults. + + (DER - 2021/04/20) + + - Removal of pre-VS2015 work-arounds + + HDF5 now requires Visual Studio 2015 or greater, so old work-around + code and definitions have been removed, including: + + * + * snprintf and vsnprintf + * llround, llroundf, lround, lroundf, round, roundf + * strtoll and strtoull + * va_copy + * struct timespec + + (DER - 2021/03/22) + + - Add CMake variable HDF5_LIB_INFIX + + This infix is added to all library names after 'hdf5'. + e.g. the infix '_openmpi' results in the library name 'libhdf5_openmpi.so' + This name is used in packages on debian based systems. + (see https://packages.debian.org/jessie/amd64/libhdf5-openmpi-8/filelist) + + (barcode - 2021/03/22) + + - On macOS, Universal Binaries can now be built, allowing native execution on + both Intel and Apple Silicon (ARM) based Macs. + + To do so, set CMAKE_OSX_ARCHITECTURES="x86_64;arm64" + + (SAM - 2021/02/07, github-311) + + - Added a configure-time option to control certain compiler warnings + diagnostics + + A new configure-time option was added that allows some compiler warnings + diagnostics to have the default operation. This is mainly intended for + library developers and currently only works for gcc 10 and above. The + diagnostics flags apply to C, C++ and Fortran compilers and will appear + in "H5 C Flags", H5 C++ Flags" and H5 Fortran Flags, respectively. They + will NOT be exported to h5cc, etc. + + The default is OFF, which will disable the warnings URL and color attributes + for the warnings output. ON will not add the flags and allow default behavior. + + Autotools: --enable-diags + + CMake: HDF5_ENABLE_BUILD_DIAGS + + (ADB - 2021/02/05, HDFFV-11213) + + - CMake option to build the HDF filter plugins project as an external project + + The HDF filter plugins project is a collection of registered compression + filters that can be dynamically loaded when needed to access data stored + in a hdf5 file. This CMake-only option allows the plugins to be built and + distributed with the hdf5 library and tools. Like the options for szip and + zlib, either a tgz file or a git repository can be specified for the source. + + The option was refactored to use the CMake FetchContent process. This allows + more control over the filter targets, but required external project command + options to be moved to a CMake include file, HDF5PluginCache.cmake. Also + enabled the filter examples to be used as tests for operation of the + filter plugins. + + (ADB - 2020/12/10, OESS-98) + + - FreeBSD Autotools configuration now defaults to 'cc' and 'c++' compilers + + On FreeBSD, the autotools defaulted to 'gcc' as the C compiler and did + not process C++ options. Since FreeBSD 10, the default compiler has + been clang (via 'cc'). + + The default compilers have been set to 'cc' for C and 'c++' for C++, + which will pick up clang and clang++ respectively on FreeBSD 10+. + Additionally, clang options are now set correctly for both C and C++ + and g++ options will now be set if that compiler is being used (an + omission from the former functionality). + + (DER - 2020/11/28, HDFFV-11193) + + - Fixed POSIX problems when building w/ gcc on Solaris + + When building on Solaris using gcc, the POSIX symbols were not + being set correctly, which could lead to issues like clock_gettime() + not being found. + + The standard is now set to gnu99 when building with gcc on Solaris, + which allows POSIX things to be #defined and linked correctly. This + differs slightly from the gcc norm, where we set the standard to c99 + and manually set POSIX #define symbols. + + (DER - 2020/11/25, HDFFV-11191) + + - Added a configure-time option to consider certain compiler warnings + as errors + + A new configure-time option was added that converts some compiler warnings + to errors. This is mainly intended for library developers and currently + only works for gcc and clang. The warnings that are considered errors + will appear in the generated libhdf5.settings file. These warnings apply + to C and C++ code and will appear in "H5 C Flags" and H5 C++ Flags", + respectively. They will NOT be exported to h5cc, etc. + + The default is OFF. Building with this option may fail when compiling + on operating systems and with compiler versions not commonly used by + the library developers. Compilation may also fail when headers not + under the control of the library developers (e.g., mpi.h, hdfs.h) raise + warnings. + + Autotools: --enable-warnings-as-errors + + CMake: HDF5_ENABLE_WARNINGS_AS_ERRORS + + (DER - 2020/11/23, HDFFV-11189) + + - Autotools and CMake target added to produce doxygen generated documentation + + The default is OFF or disabled. + Autoconf option is '--enable-doxygen' + autotools make target is 'doxygen' and will build all doxygen targets + CMake configure option is 'HDF5_BUILD_DOC'. + CMake target is 'doxygen' for all available doxygen targets + CMake target is 'hdf5lib_doc' for the src subdirectory + + (ADB - 2020/11/03) + + - CMake option to use MSVC naming conventions with MinGW + + HDF5_MSVC_NAMING_CONVENTION option enable to use MSVC naming conventions + when using a MinGW toolchain + + (xan - 2020/10/30) + + - CMake option to statically link gcc libs with MinGW + + HDF5_MINGW_STATIC_GCC_LIBS allows to statically link libg/libstdc++ + with the MinGW toolchain + + (xan - 2020/10/30) + + - CMake option to build the HDF filter plugins project as an external project + + The HDF filter plugins project is a collection of registered compression + filters that can be dynamically loaded when needed to access data stored + in a hdf5 file. This CMake-only option allows the plugins to be built and + distributed with the hdf5 library and tools. Like the options for szip and + zlib, either a tgz file or a git repository can be specified for the source. + + The necessary options are (see the INSTALL_CMake.txt file): + HDF5_ENABLE_PLUGIN_SUPPORT + PLUGIN_TGZ_NAME or PLUGIN_GIT_URL + There are more options necessary for various filters and the plugin project + documents should be referenced. + + (ADB - 2020/09/27, OESS-98) + + - Added CMake option to format source files + + HDF5_ENABLE_FORMATTERS option will enable creation of targets using the + pattern - HDF5_*_SRC_FORMAT - where * corresponds to the source folder + or tool folder. All sources can be formatted by executing the format target; + make format + + (ADB - 2020/08/24) + + - Add file locking configure and CMake options + + HDF5 1.10.0 introduced a file locking scheme, primarily to help + enforce SWMR setup. Formerly, the only user-level control of the scheme + was via the HDF5_USE_FILE_LOCKING environment variable. + + This change introduces configure-time options that control whether + or not file locking will be used and whether or not the library + ignores errors when locking has been disabled on the file system + (useful on some HPC Lustre installations). + + In both the Autotools and CMake, the settings have the effect of changing + the default property list settings (see the H5Pset/get_file_locking() + entry, below). + + The yes/no/best-effort file locking configure setting has also been + added to the libhdf5.settings file. + + Autotools: + + An --enable-file-locking=(yes|no|best-effort) option has been added. + + yes: Use file locking. + no: Do not use file locking. + best-effort: Use file locking and ignore "disabled" errors. + + CMake: + + Two self-explanatory options have been added: + + HDF5_USE_FILE_LOCKING + HDF5_IGNORE_DISABLED_FILE_LOCKS + + Setting both of these to ON is the equivalent to the Autotools' + best-effort setting. + + NOTE: + The precedence order of the various file locking control mechanisms is: + + 1) HDF5_USE_FILE_LOCKING environment variable (highest) + + 2) H5Pset_file_locking() + + 3) configure/CMake options (which set the property list defaults) + + 4) library defaults (currently best-effort) + + (DER - 2020/07/30, HDFFV-11092) + + - CMake option to link the generated Fortran MOD files into the include + directory. + + The Fortran generation of MOD files by a Fortran compile can produce + different binary files between SHARED and STATIC compiles with different + compilers and/or different platforms. Note that it has been found that + different versions of Fortran compilers will produce incompatible MOD + files. Currently, CMake will locate these MOD files in subfolders of + the include directory and add that path to the Fortran library target + in the CMake config file, which can be used by the CMake find library + process. For other build systems using the binary from a CMake install, + a new CMake configuration can be used to copy the pre-chosen version + of the Fortran MOD files into the install include directory. + + The default will depend on the configuration of + BUILD_STATIC_LIBS and BUILD_SHARED_LIBS: + YES YES Default to SHARED + YES NO Default to STATIC + NO YES Default to SHARED + NO NO Default to SHARED + The defaults can be overridden by setting the config option + HDF5_INSTALL_MOD_FORTRAN to one of NO, SHARED, or STATIC + + (ADB - 2020/07/09, HDFFV-11116) + + - CMake option to use AEC (open source SZip) library instead of SZip + + The open source AEC library is a replacement library for SZip. In + order to use it for hdf5 the libaec CMake source was changed to add + "-fPIC" and exclude test files. Autotools does not build the + compression libraries within hdf5 builds. New option USE_LIBAEC is + required to compensate for the different files produced by AEC build. + + (ADB - 2020/04/22, OESS-65) + + - CMake ConfigureChecks.cmake file now uses CHECK_STRUCT_HAS_MEMBER + + Some handcrafted tests in HDFTests.c has been removed and the CMake + CHECK_STRUCT_HAS_MEMBER module has been used. + + (ADB - 2020/03/24, TRILAB-24) + + - Both build systems use same set of warnings flags + + GNU C, C++ and gfortran warnings flags were moved to files in a config + sub-folder named gnu-warnings. Flags that only are available for a specific + version of the compiler are in files named with that version. + Clang C warnings flags were moved to files in a config sub-folder + named clang-warnings. + Intel C, Fortran warnings flags were moved to files in a config sub-folder + named intel-warnings. + + There are flags in named "error-xxx" files with warnings that may + be promoted to errors. Some source files may still need fixes. + + There are also pairs of files named "developer-xxx" and "no-developer-xxx" + that are chosen by the CMake option:HDF5_ENABLE_DEV_WARNINGS or the + configure option:--enable-developer-warnings. + + In addition, CMake no longer applies these warnings for examples. + + (ADB - 2020/03/24, TRILAB-192) + + + Library: + -------- + - Overhauled the Virtual Object Layer (VOL) + + The virtual object layer (VOL) was added in HDF5 1.12.0 but the initial + implementation required API-breaking changes to better support optional + operations and pass-through VOL connectors. The original VOL API is + now considered deprecated and VOL users and connector authors should + target the 1.14 VOL API. + + The specific changes are too extensive to document in a release note, so + VOL users and connector authors should consult the updated VOL connector + author's guide and the 1.12-1.14 VOL migration guide. + + (DER - 2022/12/28) + + - H5VLquery_optional() signature change + + The last parameter of this API call has changed from a pointer to hbool_t + to a pointer to uint64_t. Due to the changes in how optional operations + are handled in the 1.14 VOL API, we cannot make the old API call work + with the new scheme, so there is no API compatibility macro for it. + + (DER - 2022/12/28) + + - H5I_free_t callback signature change + + In order to support asynchronous operations and future IDs, the signature + of the H5I_free_t callback has been modified to take a second 'request' + parameter. Due to the nature of the internal library changes, no API + compatibility macro is available for this change. + + (DER - 2022/12/28) + + - Fix for CVE-2019-8396 + + Malformed HDF5 files may have truncated content which does not match + the expected size. When H5O__pline_decode() attempts to decode these it + may read past the end of the allocated space leading to heap overflows + as bounds checking is incomplete. + + The fix ensures each element is within bounds before reading. + + (2022/11/09 - HDFFV-10712, CVE-2019-8396, GitHub #2209) + + - Removal of memory allocation sanity checks feature + + This feature added heap canaries and statistics tracking for internal + library memory operations. Unfortunately, the heap canaries caused + problems when library memory operations were mixed with standard C + library memory operations (such as in the filter pipeline, where + buffers may have to be reallocated). Since any platform with a C + compiler also usually has much more sophisticated memory sanity + checking tools than the HDF5 library provided (e.g., valgrind), we + have decided to to remove the feature entirely. + + In addition to the configure changes described above, this also removes + the following from the public API: + H5get_alloc_stats() + H5_alloc_stats_t + + (DER - 2022/11/03) + + - Added multi dataset I/O feature + + Added H5Dread_multi, H5Dread_multi_async, H5Dwrite_multi, and + H5Dwrite_multi_async API routines to allow I/O on multiple datasets with a + single API call. Added H5Dread_multi_f and H5Dwrite_multi_f Fortran + wrappers. Updated VOL callbacks for dataset I/O to support multi dataset + I/O. + + (NAF - 2022/10/19) + + - Onion VFD + + The onion VFD allows creating "versioned" HDF5 files. File open/close + operations after initial file creation will add changes to an external + "onion" file (.onion extension by default) instead of the original file. + Each written revision can be opened independently. + + To open a file with the onion VFD, use the H5Pset_fapl_onion() API call + (does not need to be used for the initial creation of the file). The + options for the H5FD_onion_fapl_info_t struct are described in H5FDonion.h. + + The H5FDonion_get_revision_count() API call can be used to query a file + to find out how many revisions have been created. + + (DER - 2022/08/02) + + - Subfiling VFD + + The HDF5 Subfiling VFD is a new MPI-based file driver that allows an + HDF5 application to distribute an HDF5 file across a collection of + "sub-files" in equal-sized data segment "stripes". I/O to the logical + HDF5 file is then directed to the appropriate "sub-file" according to + the Subfiling configuration and a system of I/O concentrators, which + are MPI ranks operating worker threads. + + By allowing a configurable stripe size, number of I/O concentrators and + method for selecting MPI ranks as I/O concentrators, the Subfiling VFD + aims to enable an HDF5 application to find a middle ground between the + single shared file and file-per-process approaches to parallel file I/O + for the particular machine the application is running on. In general, the + goal is to avoid some of the complexity of the file-per-process approach + while also minimizing the locking issues of the single shared file approach + on a parallel file system. + + Also included with the Subfiling VFD is a new h5fuse.sh script which + reads a Subfiling configuration file and then combines the various + sub-files back into a single HDF5 file. By default, the h5fuse.sh script + looks in the current directory for the Subfiling configuration file, + but can also be pointed to the configuration file with a command-line + option. + + The Subfiling VFD can be used by calling H5Pset_fapl_subfiling() on a + File Access Property List and using that FAPL for file operations. Note + that the Subfiling VFD currently has the following limitations: + + * Does not currently support HDF5 collective I/O, other than collective + metadata writes and reads as set by H5Pset_coll_metadata_write() and + H5Pset_all_coll_metadata_ops() + + * The Subfiling VFD should not currently be used with an HDF5 library + that has been built with thread-safety enabled. This can cause deadlocks + when failures occur due to interactions between the VFD's internal + threads and HDF5's global lock. + + (JTH - 2022/07/22) + + - Add a new public function, H5ESget_requests() + + This function allows the user to retrieve request pointers from an event + set. It is intended for use primarily by VOL plugin developers. + + (NAF - 2022/01/11) + + - Adds new file driver-level memory copy operation for + "ctl" callback and updates compact dataset I/O routines + to utilize it + + When accessing an HDF5 file with a file driver that uses + memory allocated in special ways (e.g., without standard + library's `malloc`), a crash could be observed when HDF5 + tries to perform `memcpy` operations on such a memory + region. + + These changes add a new H5FD_FEAT_MEMMANAGE VFD feature + flag, which, if specified as supported by a VFD, will + inform HDF5 that the VFD either uses special memory + management routines or wishes to perform memory management + in a specific way. Therefore, this flag instructs HDF5 to + ask the file driver to perform memory management for + certain operations. + + These changes also introduce a new "ctl" callback + operation identified by the H5FD_CTL__MEM_COPY op code. + This operation simply asks a VFD to perform a memory copy. + The arguments to this operation are passed to the "ctl" + callback's "input" parameter as a pointer to a struct + defined as: + + struct H5FD_ctl_memcpy_args_t { + void * dstbuf; /**< Destination buffer */ + hsize_t dst_off; /**< Offset within destination buffer */ + const void *srcbuf; /**< Source buffer */ + hsize_t src_off; /**< Offset within source buffer */ + size_t len; /**< Length of data to copy from source buffer */ + } H5FD_ctl_memcpy_args_t; + + Further, HDF5's compact dataset I/O routines were + identified as a problematic area that could cause a crash + for VFDs that make use of special memory management. Those + I/O routines were therefore updated to make use of this new + "ctl" callback operation in order to ask the underlying + file driver to correctly handle memory copies. + + (JTH - 2021/09/28) + + - Adds new "ctl" callback to VFD H5FD_class_t structure + with the following prototype: + + herr_t (*ctl)(H5FD_t *file, uint64_t op_code, + uint64_t flags, const void *input, + void **output); + + This newly-added "ctl" callback allows Virtual File + Drivers to intercept and handle arbitrary operations + identified by an operation code. Its parameters are + as follows: + + `file` [in] - A pointer to the file to be operated on + `op_code` [in] - The operation code identifying the + operation to be performed + `flags` [in] - Flags governing the behavior of the + operation performed (see H5FDpublic.h + for a list of valid flags) + `input` [in] - A pointer to arguments passed to the + VFD performing the operation + `output` [out] - A pointer for the receiving VFD to + use for output from the operation + + (JRM - 2021/08/16) + + - Change how the release part of version, in major.minor.release is checked + for compatibility + + The HDF5 library uses a function, H5check_version, to check that + the version defined in the header files, which is used to compile an + application is compatible with the version codified in the library, which + the application loads at runtime. This previously required an exact match + or the library would print a warning, dump the build settings and then + abort or continue. An environment variable controlled the logic. + + Now the function first checks that the library release version, in + major.minor.release, is not older than the version in the headers. + Secondly, if the release version is different, it checks if either + the library version or the header version is in the exception list, in + which case the release part of version, in major.minor.release, must + be exact. An environment variable still controls the logic. + + (ADB - 2021/07/27) + + - gcc warning suppression macros were moved out of H5public.h + + The HDF5 library uses a set of macros to suppress warnings on gcc. + These warnings were originally located in H5public.h so that the + multi VFD (which only uses public headers) could also make use of them + but internal macros should not be publicly exposed like this. + + These macros have now been moved to H5private.h. Pending future multi + VFD refactoring, the macros have been duplicated in H5FDmulti.c to + suppress the format string warnings there. + + (DER - 2021/06/03) + + - H5Gcreate1() now rejects size_hint parameters larger than UINT32_MAX + + The size_hint value is ultimately stored in a uint32_t struct field, + so specifying a value larger than this on a 64-bit machine can cause + undefined behavior including crashing the system. + + The documentation for this API call was also incorrect, stating that + passing a negative value would cause the library to use a default + value. Instead, passing a "negative" value actually passes a very large + value, which is probably not what the user intends and can cause + crashes on 64-bit systems. + + The Doxygen documentation has been updated and passing values larger + than UINT32_MAX for size_hint will now produce a normal HDF5 error. + + (DER - 2021/04/29, HDFFV-11241) + + + - H5Pset_fapl_log() no longer crashes when passed an invalid fapl ID + + When passed an invalid fapl ID, H5Pset_fapl_log() would usually + segfault when attempting to free an uninitialized pointer in the error + handling code. This behavior is more common in release builds or + when the memory sanitization checks were not selected as a build + option. + + The pointer is now correctly initialized and the API call now + produces a normal HDF5 error when fed an invalid fapl ID. + + (DER - 2021/04/28, HDFFV-11240) + + - Fixes a segfault when H5Pset_mdc_log_options() is called multiple times + + The call incorrectly attempts to free an internal copy of the previous + log location string, which causes a segfault. This only happens + when the call is invoked multiple times on the same property list. + On the first call to a given fapl, the log location is set to NULL so + the segfault does not occur. + + The string is now handled properly and the segfault no longer occurs. + + (DER - 2021/04/27, HDFFV-11239) + + - HSYS_GOTO_ERROR now emits the results of GetLastError() on Windows + + HSYS_GOTO_ERROR is an internal macro that is used to produce error + messages when system calls fail. These strings include errno and the + the associated strerror() value, which are not particularly useful + when a Win32 API call fails. + + On Windows, this macro has been updated to include the result of + GetLastError(). When a system call fails on Windows, usually only + one of errno and GetLastError() will be useful, however we emit both + for the user to parse. The Windows error message is not emitted as + it would be awkward to free the FormatMessage() buffer given the + existing HDF5 error framework. Users will have to look up the error + codes in MSDN. + + The format string on Windows has been changed from: + + "%s, errno = %d, error message = '%s'" + + to: + + "%s, errno = %d, error message = '%s', Win32 GetLastError() = %"PRIu32"" + + for those inclined to parse it for error values. + + (DER - 2021/03/21) + + - File locking now works on Windows + + Since version 1.10.0, the HDF5 library has used a file locking scheme + to help enforce one reader at a time accessing an HDF5 file, which can + be helpful when setting up readers and writers to use the single- + writer/multiple-readers (SWMR) access pattern. + + In the past, this was only functional on POSIX systems where flock() or + fcntl() were present. Windows used a no-op stub that always succeeded. + + HDF5 now uses LockFileEx() and UnlockFileEx() to lock the file using the + same scheme as POSIX systems. We lock the entire file when we set up the + locks (by passing DWORDMAX as both size parameters to LockFileEx()). + + (DER - 2021/03/19, HDFFV-10191) + + - H5Epush_ret() now requires a trailing semicolon + + H5Epush_ret() is a function-like macro that has been changed to + contain a `do {} while(0)` loop. Consequently, a trailing semicolon + is now required to end the `while` statement. Previously, a trailing + semi would work, but was not mandatory. This change was made to allow + clang-format to correctly format the source code. + + (SAM - 2021/03/03) + + - Improved performance of H5Sget_select_elem_pointlist + + Modified library to cache the point after the last block of points + retrieved by H5Sget_select_elem_pointlist, so a subsequent call to the + same function to retrieve the next block of points from the list can + proceed immediately without needing to iterate over the point list. + + (NAF - 2021/01/19) + + - Replaced H5E_ATOM with H5E_ID in H5Epubgen.h + + The term "atom" is archaic and not in line with current HDF5 library + terminology, which uses "ID" instead. "Atom" has mostly been purged + from the library internals and this change removes H5E_ATOM from + the H5Epubgen.h (exposed via H5Epublic.h) and replaces it with + H5E_ID. + + (DER - 2020/11/24, HDFFV-11190) + + - Add a new public function H5Ssel_iter_reset + + This function resets a dataspace selection iterator back to an + initial state so that it may be used for iteration once more. + This can be useful when needing to iterate over a selection + multiple times without having to repeatedly create/destroy + a selection iterator for that dataspace selection. + + (JTH - 2020/09/18) + + - Remove HDFS VFD stubs + + The original implementation of the HDFS VFD included non-functional + versions of the following public API calls when the HDFS VFD is + not built as a part of the HDF5 library: + + * H5FD_hdfs_init() + * H5Pget_fapl_hdfs() + * H5Pset_fapl_hdfs() + + They will remain present in HDF5 1.10 and HDF5 1.12 releases + for binary compatibility purposes but have been removed as of 1.14.0. + + Note that this has nothing to do with the real HDFS VFD API calls + that are fully functional when the HDFS VFD is configured and built. + + We simply changed: + + #ifdef LIBHDFS + + #else + + #endif + + to: + + #ifdef LIBHDFS + + #endif + + Which is how the other optional VFDs are handled. + + (DER - 2020/08/27) + + - Add Mirror VFD + + Use TCP/IP sockets to perform write-only (W/O) file I/O on a remote + machine. Must be used in conjunction with the Splitter VFD. + + (JOS - 2020/03/13, TBD) + + - Add Splitter VFD + + Maintain separate R/W and W/O channels for "concurrent" file writes + to two files using a single HDF5 file handle. + + (JOS - 2020/03/13, TBD) + + + Parallel Library: + ----------------- + - Several improvements to parallel compression feature, including: + + * Improved support for collective I/O (for both writes and reads) + + * Significant reduction of memory usage for the feature as a whole + + * Reduction of copying of application data buffers passed to H5Dwrite + + * Addition of support for incremental file space allocation for filtered + datasets created in parallel. Incremental file space allocation is the + default for these types of datasets (early file space allocation is + also still supported), while early file space allocation is still the + default (and only supported at allocation time) for unfiltered datasets + created in parallel. Incremental file space allocation should help with + parallel HDF5 applications that wish to use fill values on filtered + datasets, but would typically avoid doing so since dataset creation in + parallel would often take an excessive amount of time. Since these + datasets previously used early file space allocation, HDF5 would + allocate space for and write fill values to every chunk in the dataset + at creation time, leading to noticeable overhead. Instead, with + incremental file space allocation, allocation of file space for chunks + and writing of fill values to those chunks will be delayed until each + individual chunk is initially written to. + + * Addition of support for HDF5's "don't filter partial edge chunks" flag + (https://portal.hdfgroup.org/display/HDF5/H5P_SET_CHUNK_OPTS) + + * Addition of proper support for HDF5 fill values with the feature + + * Addition of 'H5_HAVE_PARALLEL_FILTERED_WRITES' macro to H5pubconf.h + so HDF5 applications can determine at compile-time whether the feature + is available + + * Addition of simple examples (ph5_filtered_writes.c and + ph5_filtered_writes_no_sel.c) under examples directory to demonstrate + usage of the feature + + * Improved coverage of regression testing for the feature + + (JTH - 2022/2/23) + + + Fortran Library: + ---------------- + - Added pointer based H5Dfill_f API + + Added Fortran H5Dfill_f, which is fully equivalent to the C API. It accepts pointers, + fill value datatype and datatype of dataspace elements. + + (MSB - 2022/10/10, HDFFV-10734.) + + - H5Fget_name_f fixed to handle correctly trailing whitespaces and + newly allocated buffers. + + (MSB - 2021/08/30, github-826,972) + + - Add wrappers for H5Pset/get_file_locking() API calls + + h5pget_file_locking_f() + h5pset_file_locking_f() + + See the configure option discussion for HDFFV-11092 (above) for more + information on the file locking feature and how it's controlled. + + (DER - 2020/07/30, HDFFV-11092) + + + C++ Library: + ------------ + - Added two new constructors to H5::H5File class + + Two new constructors were added to allow opening a file with non-default + access property list. + + - Add wrappers for H5Pset/get_file_locking() API calls + + FileAccPropList::setFileLocking() + FileAccPropList::getFileLocking() + + See the configure option discussion for HDFFV-11092 (above) for more + information on the file locking feature and how it's controlled. + + (DER - 2020/07/30, HDFFV-11092) + + + Java Library: + ------------- + - Added version of H5Rget_name to return the name as a Java string. + + Other functions that get_name process the get_size then get the name + within the JNI implementation. Now H5Rget_name has a H5Rget_name_string. + + (ADB - 2022/07/12) + + - Added reference support to H5A and H5D read write vlen JNI functions. + + Added the implementation to handle VL references as an Array of Lists + of byte arrays. + + The JNI wrappers translate the Array of Lists to/from the hvl_t vlen + structures. The wrappers use the specified datatype arguments for the + List type translation, it is expected that the Java type is correct. + + (ADB - 2022/07/11, HDFFV-11318) + + - H5A and H5D read write vlen JNI functions were incorrect. + + Corrected the vlen function implementations for the basic primitive types. + The VLStrings functions now correctly use the implementation that had been + the VL functions. (VLStrings functions did not have an implementation.) + The new VL functions implementation now expect an Array of Lists between + Java and the JNI wrapper. + + The JNI wrappers translate the Array of Lists to/from the hvl_t vlen + structures. The wrappers use the specified datatype arguments for the + List type translation, it is expected that the Java type is correct. + + (ADB - 2022/07/07, HDFFV-11310) + + - H5A and H5D read write JNI functions had flawed vlen datatype check. + + Adapted tools function for JNI utils file. This reduced multiple calls + to a single check and variable. The variable can then be used to call + the H5Treclaim function. Adjusted existing test and added new test. + + (ADB - 2022/06/22) + + - Replaced HDF5AtomException with HDF5IdException + + Since H5E_ATOM changed to H5E_ID in the C library, the Java exception + that wraps the error category was also renamed. Its functionality + remains unchanged aside from the name. + + (See also the HDFFV-11190 note in the C library section) + + (DER - 2020/11/24, HDFFV-11190) + + - Added new H5S functions. + + H5Sselect_copy, H5Sselect_shape_same, H5Sselect_adjust, + H5Sselect_intersect_block, H5Sselect_project_intersection, + H5Scombine_hyperslab, H5Smodify_select, H5Scombine_select + wrapper functions added. + + (ADB - 2020/10/27, HDFFV-10868) + + - Add wrappers for H5Pset/get_file_locking() API calls + + H5Pset_file_locking() + H5Pget_use_file_locking() + H5Pget_ignore_disabled_file_locking() + + Unlike the C++ and Fortran wrappers, there are separate getters for the + two file locking settings, each of which returns a boolean value. + + See the configure option discussion for HDFFV-11092 (above) for more + information on the file locking feature and how it's controlled. + + (DER - 2020/07/30, HDFFV-11092) + + + Tools: + ------ + - Building h5perf/h5perf_serial in "standalone mode" has been removed + + Building h5perf separately from the library was added circa 2008 + in HDF5 1.6.8. It's unclear what purpose this serves and the current + implementation is currently broken. The existing files require + H5private.h and the symbols we use to determine how the copied + platform-independence scheme should be used come from H5pubconf.h, + which may not match the compiler being used to build standalone h5perf. + + Due to the maintenance overhead and lack of a clear use case, support + for building h5perf and h5perf_serial separately from the HDF5 library + has been removed. + + (DER - 2022/07/15) + + - The perf tool has been removed + + The small `perf` tool didn't really do anything special and the name + conflicts with gnu's perf tool. + + (DER - 2022/07/15, GitHub #1787) + + - 1.10 References in containers were not displayed properly by h5dump. + + Ported 1.10 tools display function to provide ability to inspect and + display 1.10 reference data. + + (ADB - 2022/06/22) + + - h5repack added an optional verbose value for reporting R/W timing. + + In addition to adding timing capture around the read/write calls in + h5repack, added help text to indicate how to show timing for read/write; + -v N, --verbose=N Verbose mode, print object information. + N - is an integer greater than 1, 2 displays read/write timing + (ADB - 2021/11/08) + + - Added a new (unix ONLY) parallel meta tool 'h5dwalk', which utilizes the + mpifileutils (https://hpc.github.io/mpifileutils) open source utility + library to enable parallel execution of other HDF5 tools. + This approach can greatly enhance the serial hdf5 tool performance over large + collections of files by utilizing MPI parallelism to distribute an application + load over many independent MPI ranks and files. + + An introduction to the mpifileutils library and initial 'User Guide' for + the new 'h5dwalk" tool can be found at: + https://github.com/HDFGroup/hdf5doc/tree/master/RFCs/HDF5/tools/parallel_tools + + (RAW - 2021/10/25) + + - Refactored the perform tools and removed depends on test library. + + Moved the perf and h5perf tools from tools/test/perform to + tools/src/h5perf so that they can be installed. This required + that the test library dependency be removed by copying the + needed functions from h5test.c. + The standalone scripts and other perform tools remain in the + tools/test/perform folder. + + (ADB - 2021/08/10) + + - Removed partial long exceptions + + Some of the tools accepted shortened versions of the long options + (ex: --datas instead of --dataset). These were implemented inconsistently, + are difficult to maintain, and occasionally block useful long option + names. These partial long options have been removed from all the tools. + + (DER - 2021/08/03) + + - h5repack added help text for user-defined filters. + + Added help text line that states the valid values of the filter flag + for user-defined filters; + filter_flag: 1 is OPTIONAL or 0 is MANDATORY + + (ADB - 2021/01/14, HDFFV-11099) + + - Added h5delete tool + + Deleting HDF5 storage when using the VOL can be tricky when the VOL + does not create files. The h5delete tool is a simple wrapper around + the H5Fdelete() API call that uses the VOL specified in the + HDF5_VOL_CONNECTOR environment variable to delete a "file". If + the call to H5Fdelete() fails, the tool will attempt to use + the POSIX remove(3) call to remove the file. + + Note that the HDF5 library does currently have support for + H5Fdelete() in the native VOL connector. + + (DER - 2020/12/16) + + - h5repack added options to control how external links are handled. + + Currently h5repack preserves external links and cannot copy and merge + data from the external files. Two options, merge and prune, were added to + control how to merge data from an external link into the resulting file. + --merge Follow external soft link recursively and merge data. + --prune Do not follow external soft links and remove link. + --merge --prune Follow external link, merge data and remove dangling link. + + (ADB - 2020/08/05, HDFFV-9984) + + - h5repack was fixed to repack the reference attributes properly. + The code line that checks if the update of reference inside a compound + datatype is misplaced outside the code block loop that carries out the + check. In consequence, the next attribute that is not the reference + type was repacked again as the reference type and caused the failure of + repacking. The fix is to move the corresponding code line to the correct + code block. + + (KY -2020/02/07, HDFFV-11014) + + + High-Level APIs: + ---------------- + - added set/get for unsigned long long attributes + + The attribute writing high-level API has been expanded to include + public set/get functions for ULL attributes, analogously to the + existing set/get for other types. + + (AF - 2021/09/08) + + + C Packet Table API: + ------------------- + - + + + Internal header file: + --------------------- + - All the #defines named H5FD_CTL__* were renamed to H5FD_CTL_*, i.e. the double underscore was reduced to a single underscore. + + + Documentation: + -------------- + - Doxygen User Guide documentation is available when configured and generated. + The resulting documentation files will be in the share/html subdirectory + of the HDF5 install directory. + + (ADB - 2022/08/09) + + +Support for new platforms, languages and compilers +================================================== + - + + +Bug Fixes since HDF5-1.12.0 release +=================================== + Library + ------- + - Seg fault on file close + + h5debug fails at file close with core dump on a file that has an + illegal file size in its cache image. In H5F_dest(), the library + performs all the closing operations for the file and keeps track of + the error encountered when reading the file cache image. + At the end of the routine, it frees the file's file structure and + returns error. Due to the error return, the file object is not removed + from the ID node table. This eventually causes assertion failure in + H5VL__native_file_close() when the library finally exits and tries to + access that file object in the table for closing. + + The closing routine, H5F_dest(), will not free the file structure if + there is error, keeping a valid file structure in the ID node table. + It will be freed later in H5VL__native_file_close() when the + library exits and terminates the file package. + + (VC - 2022/12/14, HDFFV-11052, CVE-2020-10812) + + - Fix CVE-2018-13867 / GHSA-j8jr-chrh-qfrf + + Validate location (offset) of the accumulated metadata when comparing. + + Initially, the accumulated metadata location is initialized to HADDR_UNDEF + - the highest available address. Bogus input files may provide a location + or size matching this value. Comparing this address against such bogus + values may provide false positives. Thus make sure, the value has been + initialized or fail the comparison early and let other parts of the + code deal with the bogus address/size. + Note: To avoid unnecessary checks, it is assumed that if the 'dirty' + member in the same structure is true the location is valid. + + (EFE - 2022/10/10 GH-2230) + + - Fix CVE-2018-16438 / GHSA-9xmm-cpf8-rgmx + + Make sure info block for external links has at least 3 bytes. + + According to the specification, the information block for external links + contains 1 byte of version/flag information and two 0 terminated strings + for the object linked to and the full path. + Although not very useful, the minimum string length for each (with + terminating 0) would be one byte. + Checking this helps to avoid SEGVs triggered by bogus files. + + (EFE - 2022/10/09 GH-2233) + + - CVE-2021-46244 / GHSA-vrxh-5gxg-rmhm + + Compound datatypes may not have members of size 0 + + A member size of 0 may lead to an FPE later on as reported in + CVE-2021-46244. To avoid this, check for this as soon as the + member is decoded. + + (EFE - 2022/10/05 GEH-2242) + + + - Fix CVE-2021-45830 / GHSA-5h2h-fjjr-x9m2 + + Make H5O__fsinfo_decode() more resilient to out-of-bound reads. + + When decoding a file space info message in H5O__fsinfo_decode() make + sure each element to be decoded is still within the message. Malformed + hdf5 files may have trunkated content which does not match the + expected size. Checking this will prevent attempting to decode + unrelated data and heap overflows. So far, only free space manager + address data was checked before decoding. + + (EFE - 2022/10/05 GH-2228) + + - Fix CVE-2021-46242 / GHSA-x9pw-hh7v-wjpf + + When evicting driver info block, NULL the corresponding entry. + + Since H5C_expunge_entry() called (from H5AC_expunge_entry()) sets the flag + H5C__FLUSH_INVALIDATE_FLAG, the driver info block will be freed. NULLing + the pointer in f->shared->drvinfo will prevent use-after-free when it is + used in other functions (like H5F__dest()) - as other places will check + whether the pointer is initialized before using its value. + + (EFE - 2022/09/29 GH-2254) + + - Fix CVE-2021-45833 / GHSA-x57p-jwp6-4v79 + + Report error if dimensions of chunked storage in data layout < 2 + + For Data Layout Messages version 1 & 2 the specification state + that the value stored in the data field is 1 greater than the + number of dimensions in the dataspace. For version 3 this is + not explicitly stated but the implementation suggests it to be + the case. + Thus the set value needs to be at least 2. For dimensionality + < 2 an out-of-bounds access occurs. + + (EFE - 2022/09/28 GH-2240) + + - Fix CVE-2018-14031 / GHSA-2xc7-724c-r36j + + Parent of enum datatype message must have the same size as the + enum datatype message itself. + Functions accessing the enumeration values use the size of the + enumeration datatype to determine the size of each element and + how much data to copy. + Thus the size of the enumeration and its parent need to match. + Check in H5O_dtype_decode_helper() to avoid unpleasant surprises + later. + + (EFE - 2022/09/28 GH-2236) + + - Fix CVE-2018-17439 / GHSA-vcxv-vp43-rch7 + + H5IMget_image_info(): Make sure to not exceed local array size + + Malformed hdf5 files may provide more dimensions than the array dim[] in + H5IMget_image_info() is able to hold. Check number of elements first by calling + H5Sget_simple_extent_dims() with NULL for both 'dims' and 'maxdims' arguments. + This will cause the function to return only the number of dimensions. + The fix addresses a stack overflow on write. + + (EFE - 2022/09/27 HDFFV-10589, GH-2226) + + - Fixed an issue with variable length attributes + + Previously, if a variable length attribute was held open while its file + was opened through another handle, the same attribute was opened through + the second file handle, and the second file and attribute handles were + closed, attempting to write to the attribute through the first handle + would cause an error. + + (NAF - 2022/10/24) + + - Memory leak + + A memory leak was observed with variable-length fill value in + H5O_fill_convert() function in H5Ofill.c. The leak is + manifested by running valgrind on test/set_extent.c. + + Previously, fill->buf is used for datatype conversion + if it is large enough and the variable-length information + is therefore lost. A buffer is now allocated regardless + so that the element in fill->buf can later be reclaimed. + + (VC - 2022/10/10, HDFFV-10840) + + - Fixed an issue with hyperslab selections + + Previously, when combining hyperslab selections, it was possible for the + library to produce an incorrect combined selection. + + (NAF - 2022/09/25) + + - Fixed an issue with attribute type conversion with compound datatypes + + Previously, when performing type conversion for attribute I/O with a + compound datatype, the library would not fill the background buffer with + the contents of the destination, potentially causing data to be lost when + only writing to a subset of the compound fields. + + (NAF - 2022/08/22, GitHub #2016) + + - The offset parameter in H5Dchunk_iter() is now scaled properly + + In earlier HDF5 1.13.x versions, the chunk offset was not scaled by the + chunk dimensions. This offset parameter in the callback now matches + that of H5Dget_chunk_info(). + + (@mkitti - 2022/08/06, GitHub #1419) + + - Converted an assertion on (possibly corrupt) file contents to a normal + error check + + Previously, the library contained an assertion check that a read superblock + doesn't contain a superblock extension message when the superblock + version < 2. When a corrupt HDF5 file is read, this assertion can be triggered + in debug builds of HDF5. In production builds, this situation could cause + either a library error or a crash, depending on the platform. + + (JTH - 2022/07/08, HDFFV-11316/HDFFV-11317) + + - Fixed a metadata cache bug when resizing a pinned/protected cache entry + + When resizing a pinned/protected cache entry, the metadata + cache code previously would wait until after resizing the + entry to attempt to log the newly-dirtied entry. This + caused H5C_resize_entry to mark the entry as dirty and made + H5AC_resize_entry think that it didn't need to add the + newly-dirtied entry to the dirty entries skiplist. + + Thus, a subsequent H5AC__log_moved_entry would think it + needed to allocate a new entry for insertion into the dirty + entry skip list, since the entry didGn't exist on that list. + This caused an assertion failure, as the code to allocate a + new entry assumes that the entry is not dirty. + + (JRM - 2022/02/28) + + - Issue #1436 identified a problem with the H5_VERS_RELEASE check in the + H5check_version function. + + Investigating the original fix, #812, we discovered some inconsistencies + with a new block added to check H5_VERS_RELEASE for incompatibilities. + This new block was not using the new warning text dealing with the + H5_VERS_RELEASE check and would cause the warning to be duplicated. + + By removing the H5_VERS_RELEASE argument in the first check for + H5_VERS_MAJOR and H5_VERS_MINOR, the second check would only check + the H5_VERS_RELEASE for incompatible release versions. This adheres + to the statement that except for the develop branch, all release versions + in a major.minor maintenance branch should be compatible. The prerequisite + is that an application will not use any APIs not present in all release versions. + + (ADB - 2022/02/24, #1438) + + - Unified handling of collective metadata reads to correctly fix old bugs + + Due to MPI-related issues occurring in HDF5 from mismanagement of the + status of collective metadata reads, they were forced to be disabled + during chunked dataset raw data I/O in the HDF5 1.10.5 release. This + wouldn't generally have affected application performance because HDF5 + already disables collective metadata reads during chunk lookup, since + it is generally unlikely that the same chunks will be read by all MPI + ranks in the I/O operation. However, this was only a partial solution + that wasn't granular enough. + + This change now unifies the handling of the file-global flag and the + API context-level flag for collective metadata reads in order to + simplify querying of the true status of collective metadata reads. Thus, + collective metadata reads are once again enabled for chunked dataset + raw data I/O, but manually controlled at places where some processing + occurs on MPI rank 0 only and would cause issues when collective + metadata reads are enabled. + + (JTH - 2021/11/16, HDFFV-10501/HDFFV-10562) + + - Fixed several potential MPI deadlocks in library failure conditions + + In the parallel library, there were several places where MPI rank 0 + could end up skipping past collective MPI operations when some failure + occurs in rank 0-specific processing. This would lead to deadlocks + where rank 0 completes an operation while other ranks wait in the + collective operation. These places have been rewritten to have rank 0 + push an error and try to cleanup after the failure, then continue to + participate in the collective operation to the best of its ability. + + (JTH - 2021/11/09) + + - Fixed an H5Pget_filter_by_id1/2() assert w/ out of range filter IDs + + Both H5Pget_filter_by_id1 and 2 did not range check the filter ID, which + could trip as assert in debug versions of the library. The library now + returns a normal HDF5 error when the filter ID is out of range. + + (DER - 2021/11/23, HDFFV-11286) + + - Fixed an issue with collective metadata reads being permanently disabled + after a dataset chunk lookup operation. This would usually cause a + mismatched MPI_Bcast and MPI_ERR_TRUNCATE issue in the library for + simple cases of H5Dcreate() -> H5Dwrite() -> H5Dcreate(). + + (JTH - 2021/11/08, HDFFV-11090) + + - Fixed cross platform incompatibility of references within variable length + types + + Reference types within variable length types previously could not be + read on a platform with different endianness from where they were + written. Fixed so cross platform portability is restored. + + (NAF - 2021/09/30) + + - Detection of simple data transform function "x" + + In the case of the simple data transform function "x" the (parallel) + library recognizes this is the same as not applying this data transform + function. This improves the I/O performance. In the case of the parallel + library, it also avoids breaking to independent I/O, which makes it + possible to apply a filter when writing or reading data to or from + the HDF5 file. + + (JWSB - 2021/09/13) + + - Fixed an invalid read and memory leak when parsing corrupt file space + info messages + + When the corrupt file from CVE-2020-10810 was parsed by the library, + the code that imports the version 0 file space info object header + message to the version 1 struct could read past the buffer read from + the disk, causing an invalid memory read. Not catching this error would + cause downstream errors that eventually resulted in a previously + allocated buffer to be unfreed when the library shut down. In builds + where the free lists are in use, this could result in an infinite loop + and SIGABRT when the library shuts down. + + We now track the buffer size and raise an error on attempts to read + past the end of it. + + (DER - 2021/08/12, HDFFV-11053) + + + - Fixed CVE-2018-14460 + + The tool h5repack produced a segfault when the rank in dataspace + message was corrupted, causing invalid read while decoding the + dimension sizes. + + The problem was fixed by ensuring that decoding the dimension sizes + and max values will not go beyond the end of the buffer. + + (BMR - 2021/05/12, HDFFV-11223) + + - Fixed CVE-2018-11206 + + The tool h5dump produced a segfault when the size of a fill value + message was corrupted and caused a buffer overflow. + + The problem was fixed by verifying the fill value's size + against the buffer size before attempting to access the buffer. + + (BMR - 2021/03/15, HDFFV-10480) + + - Fixed CVE-2018-14033 (same issue as CVE-2020-10811) + + The tool h5dump produced a segfault when the storage size message + was corrupted and caused a buffer overflow. + + The problem was fixed by verifying the storage size against the + buffer size before attempting to access the buffer. + + (BMR - 2021/03/15, HDFFV-11159/HDFFV-11049) + + - Remove underscores on header file guards + + Header file guards used a variety of underscores at the beginning of the define. + + Removed all leading (some trailing) underscores from header file guards. + + (ADB - 2021/03/03, #361) + + - Fixed a segmentation fault + + A segmentation fault occurred with a Mathworks corrupted file. + + A detection of accessing a null pointer was added to prevent the problem. + + (BMR - 2021/02/19, HDFFV-11150) + + - Fixed issue with MPI communicator and info object not being + copied into new FAPL retrieved from H5F_get_access_plist + + Added logic to copy the MPI communicator and info object into + the output FAPL. MPI communicator is retrieved from the VFD, while + the MPI info object is retrieved from the file's original FAPL. + + (JTH - 2021/02/15, HDFFV-11109) + + - Fixed problems with vlens and refs inside compound using + H5VLget_file_type() + + Modified library to properly ref count H5VL_object_t structs and only + consider file vlen and reference types to be equal if their files are + the same. + + (NAF - 2021/01/22) + + - Fixed CVE-2018-17432 + + The tool h5repack produced a segfault on a corrupted file which had + invalid rank for scalar or NULL datatype. + + The problem was fixed by modifying the dataspace encode and decode + functions to detect and report invalid rank. h5repack now fails + with an error message for the corrupted file. + + (BMR - 2020/10/26, HDFFV-10590) + + - Creation of dataset with optional filter + + When the combination of type, space, etc doesn't work for filter + and the filter is optional, it was supposed to be skipped but it was + not skipped and the creation failed. + + Allowed the creation of the dataset in such a situation. + + (BMR - 2020/08/13, HDFFV-10933) + + - Explicitly declared dlopen to use RTLD_LOCAL + + dlopen documentation states that if neither RTLD_GLOBAL nor + RTLD_LOCAL are specified, then the default behavior is unspecified. + The default on linux is usually RTLD_LOCAL while macos will default + to RTLD_GLOBAL. + + (ADB - 2020/08/12, HDFFV-11127) + + - H5Sset_extent_none() sets the dataspace class to H5S_NO_CLASS which + causes asserts/errors when passed to other dataspace API calls. + + H5S_NO_CLASS is an internal class value that should not have been + exposed via a public API call. + + In debug builds of the library, this can cause assert() function to + trip. In non-debug builds, it will produce normal library errors. + + The new library behavior is for H5Sset_extent_none() to convert + the dataspace into one of type H5S_NULL, which is better handled + by the library and easier for developers to reason about. + + (DER - 2020/07/27, HDFFV-11027) + + - Fixed issues CVE-2018-13870 and CVE-2018-13869 + + When a buffer overflow occurred because a name length was corrupted + and became very large, h5dump crashed on memory access violation. + + A check for reading pass the end of the buffer was added to multiple + locations to prevent the crashes and h5dump now simply fails with an + error message when this error condition occurs. + + (BMR - 2020/07/22, HDFFV-11120 and HDFFV-11121) + + - Fixed the segmentation fault when reading attributes with multiple threads + + It was reported that the reading of attributes with variable length string + datatype will crash with segmentation fault particularly when the number of + threads is high (>16 threads). The problem was due to the file pointer that + was set in the variable length string datatype for the attribute. That file + pointer was already closed when the attribute was accessed. + + The problem was fixed by setting the file pointer to the current opened file pointer + when the attribute was accessed. Similar patch up was done before when reading + dataset with variable length string datatype. + + (VC - 2020/07/13, HDFFV-11080) + + - Fixed CVE-2020-10810 + + The tool h5clear produced a segfault during an error recovery in + the superblock decoding. An internal pointer was reset to prevent + further accessing when it is not assigned with a value. + + (BMR - 2020/06/29, HDFFV-11053) + + - Fixed CVE-2018-17435 + + The tool h52gif produced a segfault when the size of an attribute + message was corrupted and caused a buffer overflow. + + The problem was fixed by verifying the attribute message's size + against the buffer size before accessing the buffer. h52gif was + also fixed to display the failure instead of silently exiting + after the segfault was eliminated. + + (BMR - 2020/06/19, HDFFV-10591) + + + Java Library + ------------ + - Improve variable-length datatype handling in JNI. + + The existing JNI read-write functions could handle variable-length datatypes + that were simple variable-length datatype with an atomic sub-datatype. More + complex combinations could not be handled. Reworked the JNI read-write functions + to recursively inspect datatypes for variable-length sub-datatypes. + + (ADB - 2022/10/12, HDFFV-8701,10375) + + - JNI utility function does not handle new references. + + The JNI utility function for converting reference data to string did + not use the new APIs. In addition to fixing that function, added new + java tests for using the new APIs. + + (ADB - 2021/02/16, HDFFV-11212) + + - The H5FArray.java class, in which virtually the entire execution time + is spent using the HDFNativeData method that converts from an array + of bytes to an array of the destination Java type. + + 1. Convert the entire byte array into a 1-d array of the desired type, + rather than performing 1 conversion per row; + 2. Use the Java Arrays method copyOfRange to grab the section of the + array from (1) that is desired to be inserted into the destination array. + + (PGT,ADB - 2020/12/13, HDFFV-10865) + + + Configuration + ------------- + - Remove Javadoc generation + + The use of doxygen now supersedes the requirement to build javadocs. We do not + have the resources to continue to support two documentation methods and have + chosen doxygen as our standard. + + (ADB - 2022/12/19) + + - Change the default for building the high-level GIF tools + + The gif2h5 and h52gif high-level tools are deprecated and will be removed + in a future release. The default build setting for them has been changed + from enabled to disabled. A user can enable the build of these tools if + needed. + + autotools: --enable-hlgiftools + cmake: HDF5_BUILD_HL_GIF_TOOLS=ON + + Disabling the GIF tools eliminates the following CVEs: + + HDFFV-10592 CVE-2018-17433 + HDFFV-10593 CVE-2018-17436 + HDFFV-11048 CVE-2020-10809 + + (ADB - 2022/12/16) + + - Change the settings of the *pc files to use the correct format + + The pkg-config files generated by CMake uses incorrect syntax for the 'Requires' + settings. Changing the set to use 'lib-name = version' instead 'lib-name-version' + fixes the issue + + (ADB - 2022/12/06 HDFFV-11355) + + - Move MPI libraries link from PRIVATE to PUBLIC + + The install dependencies were not including the need for MPI libraries when + an application or library was built with the C library. Also updated the + CMake target link command to use the newer style MPI::MPI_C link variable. + + (ADB - 2022/10/27) + + - Corrected path searched by CMake find_package command + + The install path for cmake find_package files had been changed to use + "share/cmake" + for all platforms. However the trailing "hdf5" directory was not removed. + This "hdf5" additional directory has been removed. + + (ADB - 2021/09/27) + + - Corrected pkg-config compile script + + It was discovered that the position of the "$@" argument for the command + in the compile script may fail on some platforms and configurations. The + position of the "$@"command argument was moved before the pkg-config sub command. + + (ADB - 2021/08/30) + + - Fixed CMake C++ compiler flags + + A recent refactoring of the C++ configure files accidentally removed the + file that executed the enable_language command for C++ needed by the + HDFCXXCompilerFlags.cmake file. Also updated the intel warnings files, + including adding support for windows platforms. + + (ADB - 2021/08/10) + + - Better support for libaec (open-source Szip library) in CMake + + Implemented better support for libaec 1.0.5 (or later) library. This version + of libaec contains improvements for better integration with HDF5. Furthermore, + the variable USE_LIBAEC_STATIC has been introduced to allow to make use of + static version of libaec library. Use libaec_DIR or libaec_ROOT to set + the location in which libaec can be found. + + Be aware, the Szip library of libaec 1.0.4 depends on another library within + libaec library. This dependency is not specified in the current CMake + configuration which means that one can not use the static Szip library of + libaec 1.0.4 when building HDF5. This has been resolved in libaec 1.0.5. + + (JWSB - 2021/06/22) + + - Refactor CMake configure for Fortran + + The Fortran configure tests for KINDs reused a single output file that was + read to form the Integer and Real Kinds defines. However, if config was run + more then once, the CMake completed variable prevented the tests from executing + again and the last value saved in the file was used to create the define. + Creating separate files for each KIND solved the issue. + + In addition the test for H5_PAC_C_MAX_REAL_PRECISION was not pulling in + defines for proper operation and did not define H5_PAC_C_MAX_REAL_PRECISION + correctly for a zero value. This was fixed by supplying the required defines. + In addition it was moved from the Fortran specific HDF5UseFortran.camke file + to the C centric ConfigureChecks.cmake file. + + (ADB - 2021/06/03) + + - Move emscripten flag to compile flags + + The emscripten flag, -O0, was removed from target_link_libraries command + to the correct target_compile_options command. + + (ADB - 2021/04/26 HDFFV-11083) + + - Remove arbitrary warning flag groups from CMake builds + + The arbitrary groups were created to reduce the quantity of warnings being + reported that overwhelmed testing report systems. Considerable work has + been accomplished to reduce the warning count and these arbitrary groups + are no longer needed. + Also the default for all warnings, HDF5_ENABLE_ALL_WARNINGS, is now ON. + + Visual Studio warnings C4100, C4706, and C4127 have been moved to + developer warnings, HDF5_ENABLE_DEV_WARNINGS, and are disabled for normal builds. + + (ADB - 2021/03/22, HDFFV-11228) + + - Reclassify CMake messages, to allow new modes and --log-level option + + CMake message commands have a mode argument. By default, STATUS mode + was chosen for any non-error message. CMake version 3.15 added additional + modes, NOTICE, VERBOSE, DEBUG and TRACE. All message commands with a mode + of STATUS were reviewed and most were reclassified as VERBOSE. The new + mode was protected by a check for a CMake version of at least 3.15. If CMake + version 3.17 or above is used, the user can use the command line option + of "--log-level" to further restrict which message commands are displayed. + + (ADB - 2021/01/11, HDFFV-11144) + + - Fixes Autotools determination of the stat struct having an st_blocks field + + A missing parenthesis in an autoconf macro prevented building the test + code used to determine if the stat struct contains the st_blocks field. + Now that the test functions correctly, the H5_HAVE_STAT_ST_BLOCKS #define + found in H5pubconf.h will be defined correctly on both the Autotools and + CMake. This #define is only used in the tests and does not affect the + HDF5 C library. + + (DER - 2021/01/07, HDFFV-11201) + + - Add missing ENV variable line to hdfoptions.cmake file + + Using the build options to use system SZIP/ZLIB libraries need to also + specify the library root directory. Setting the {library}_ROOT ENV + variable was added to the hdfoptions.cmake file. + + (ADB - 2020/10/19 HDFFV-11108) + + + Tools + ----- + - Fix h5repack to only print output when verbose option is selected + + When timing option was added to h5repack, the check for verbose was + incorrectly implemented. + + (ADB - 2022/12/02, GH #2270) + + - Changed how h5dump and h5ls identify long double. + + Long double support is not consistent across platforms. Tools will always + identify long double as 128-bit [little/big]-endian float nn-bit precision. + New test file created for datasets with attributes for float, double and + long double. In addition any unknown integer or float datatype will now + also show the number of bits for precision. + These files are also used in the java tests. + + (ADB - 2021/03/24, HDFFV-11229,HDFFV-11113) + + - Fixed tools argument parsing. + + Tools parsing used the length of the option from the long array to match + the option from the command line. This incorrectly matched a shorter long + name option that happened to be a subset of another long option. + Changed to match whole names. + + (ADB - 2021/01/19, HDFFV-11106) + + - The tools library was updated by standardizing the error stack process. + + General sequence is: + h5tools_setprogname(PROGRAMNAME); + h5tools_setstatus(EXIT_SUCCESS); + h5tools_init(); + ... process the command-line (check for error-stack enable) ... + h5tools_error_report(); + ... (do work) ... + h5diff_exit(ret); + + (ADB - 2020/07/20, HDFFV-11066) + + - h5diff fixed a command line parsing error. + + h5diff would ignore the argument to -d (delta) if it is smaller than DBL_EPSILON. + The macro H5_DBL_ABS_EQUAL was removed and a direct value comparison was used. + + (ADB - 2020/07/20, HDFFV-10897) + + - h5diff added a command line option to ignore attributes. + + h5diff would ignore all objects with a supplied path if the exclude-path argument is used. + Adding the exclude-attribute argument will only exclude attributes, with the supplied path, + from comparison. + + (ADB - 2020/07/20, HDFFV-5935) + + - h5diff added another level to the verbose argument to print filenames. + + Added verbose level 3 that is level 2 plus the filenames. The levels are: + 0 : Identical to '-v' or '--verbose' + 1 : All level 0 information plus one-line attribute status summary + 2 : All level 1 information plus extended attribute status report + 3 : All level 2 information plus file names + + (ADB - 2020/07/20, HDFFV-1005) + + + Performance + ------------- + - + + + Fortran API + ----------- + - h5open_f and h5close_f fixes + * Fixed it so both h5open_f and h5close_f can be called multiple times. + * Fixed an issue with open objects remaining after h5close_f was called. + * Added additional tests. + (MSB, 2022/04/19, HDFFV-11306) + + + High-Level Library + ------------------ + - Fixed HL_test_packet, test for packet table vlen of vlen. + + Incorrect length assignment. + + (ADB - 2021/10/14) + + + Fortran High-Level APIs + ----------------------- + - + + + Documentation + ------------- + - + + + F90 APIs + -------- + - + + + C++ APIs + -------- + - Added DataSet::operator= + + Some compilers complain if the copy constructor is given explicitly + but the assignment operator is implicitly set to default. + + (2021/05/19) + + + Testing + ------- + - Stopped java/test/junit.sh.in installing libs for testing under ${prefix} + + Lib files needed are now copied to a subdirectory in the java/test + directory, and on Macs the loader path for libhdf5.xxxs.so is changed + in the temporary copy of libhdf5_java.dylib. + + (LRK, 2020/07/02, HDFFV-11063) + + +Platforms Tested +=================== + + Linux 5.16.14-200.fc35 GNU gcc (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) + Fedora35 clang version 13.0.0 (Fedora 13.0.0-3.fc35) + (cmake and autotools) + + Linux 5.15.0-1026-aws gcc (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0 + #30-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0 + Ubuntu 22.04 Ubuntu clang version 14.0.0-1ubuntu1 + (cmake and autotools) + + Linux 5.13.0-1031-aws GNU gcc (GCC) 9.4.0-1ubuntu1 + #35-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 9.4.0-1ubuntu1 + Ubuntu 20.04 clang version 10.0.0-4ubuntu1 + (cmake and autotools) + + Linux 5.3.18-150300-cray_shasta_c cray-mpich/8.3.3 + #1 SMP x86_64 GNU/Linux Cray clang 14.0.2, 15.0.0 + (crusher) GCC 11.2.0, 12.1.0 + (cmake) + + Linux 4.18.0-348.7.1.el8_5 gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-4) + #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 8.5.0 20210514 (Red Hat 8.5.0-4) + CentOS8 clang version 12.0.1 (Red Hat 12.0.1) + (cmake and autotools) + + Linux 4.14.0-115.35.1.1chaos openmpi 4.0.5 + #1 SMP aarch64 GNU/Linux GCC 9.3.0 (ARM-build-5) + (stria) GCC 7.2.0 (Spack GCC) + arm/20.1 + arm/22.1 + (cmake) + + Linux 4.14.0-115.35.1.3chaos spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1 + (vortex) GCC 8.3.1 + XL 16.1.1 + (cmake) + + Linux-4.14.0-115.21.2 spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 12.0.1, 14.0.5 + (lassen) GCC 8.3.1 + XL 16.1.1.2, 2021,09.22, 2022.08.05 + (cmake) + + Linux-4.12.14-197.99-default cray-mpich/7.7.14 + #1 SMP x86_64 GNU/Linux cce 12.0.3 + (theta) GCC 11.2.0 + llvm 9.0 + Intel 19.1.2 + + Linux 3.10.0-1160.36.2.el7.ppc64 gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + #1 SMP ppc64be GNU/Linux g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + Power8 (echidna) GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) + + Linux 3.10.0-1160.24.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos7 Version 4.8.5 20150623 (Red Hat 4.8.5-4) + (jelly/kituo/moohan) Version 4.9.3, Version 5.3.0, Version 6.3.0, + Version 7.2.0, Version 8.3.0, Version 9.1.0 + Intel(R) C (icc), C++ (icpc), Fortran (icc) + compilers: + Version 17.0.0.098 Build 20160721 + GNU C (gcc) and C++ (g++) 4.8.5 compilers + with NAG Fortran Compiler Release 6.1(Tozai) + Intel(R) C (icc) and C++ (icpc) 17.0.0.098 compilers + with NAG Fortran Compiler Release 6.1(Tozai) + MPICH 3.1.4 compiled with GCC 4.9.3 + MPICH 3.3 compiled with GCC 7.2.0 + OpenMPI 2.1.6 compiled with icc 18.0.1 + OpenMPI 3.1.3 and 4.0.0 compiled with GCC 7.2.0 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Version 19.10-0 + (autotools and cmake) + + Linux-3.10.0-1160.0.0.1chaos openmpi-4.1.2 + #1 SMP x86_64 GNU/Linux clang 6.0.0, 11.0.1 + (quartz) GCC 7.3.0, 8.1.0 + Intel 19.0.4, 2022.2, oneapi.2022.2 + + Linux-3.10.0-1160.71.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (skybridge) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.66.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux GCC 7.2.0 + (attaway) Intel/19.1 + (cmake) + + Linux-3.10.0-1160.59.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux Intel/19.1 + (chama) (cmake) + + macOS Apple M1 11.6 Apple clang version 12.0.5 (clang-1205.0.22.11) + Darwin 20.6.0 arm64 gfortran GNU Fortran (Homebrew GCC 11.2.0) 11.1.0 + (macmini-m1) Intel icc/icpc/ifort version 2021.3.0 202106092021.3.0 20210609 + + macOS Big Sur 11.3.1 Apple clang version 12.0.5 (clang-1205.0.22.9) + Darwin 20.4.0 x86_64 gfortran GNU Fortran (Homebrew GCC 10.2.0_3) 10.2.0 + (bigsur-1) Intel icc/icpc/ifort version 2021.2.0 20210228 + + macOS High Sierra 10.13.6 Apple LLVM version 10.0.0 (clang-1000.10.44.4) + 64-bit gfortran GNU Fortran (GCC) 6.3.0 + (bear) Intel icc/icpc/ifort version 19.0.4.233 20190416 + + macOS Sierra 10.12.6 Apple LLVM version 9.0.0 (clang-900.39.2) + 64-bit gfortran GNU Fortran (GCC) 7.4.0 + (kite) Intel icc/icpc/ifort version 17.0.2 + + Mac OS X El Capitan 10.11.6 Apple clang version 7.3.0 from Xcode 7.3 + 64-bit gfortran GNU Fortran (GCC) 5.2.0 + (osx1011test) Intel icc/icpc/ifort version 16.0.2 + + + Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) + #1 SMP x86_64 GNU/Linux compilers: + Centos6 Version 4.4.7 20120313 + (platypus) Version 4.9.3, 5.3.0, 6.2.0 + MPICH 3.1.4 compiled with GCC 4.9.3 + PGI C, Fortran, C++ for 64-bit target on + x86_64; + Version 19.10-0 + + Windows 10 x64 Visual Studio 2015 w/ Intel C/C++/Fortran 18 (cmake) + Visual Studio 2017 w/ Intel C/C++/Fortran 19 (cmake) + Visual Studio 2019 w/ clang 12.0.0 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2019 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) + Visual Studio 2022 w/ clang 15.0.1 + with MSVC-like command-line (C/C++ only - cmake) + Visual Studio 2022 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) + Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake) + + +Known Problems +============== + + ************************************************************ + * _ * + * (_) * + * __ ____ _ _ __ _ __ _ _ __ __ _ * + * \ \ /\ / / _` | '__| '_ \| | '_ \ / _` | * + * \ V V / (_| | | | | | | | | | | (_| | * + * \_/\_/ \__,_|_| |_| |_|_|_| |_|\__, | * + * __/ | * + * |___/ * + * * + * Please refrain from running any program (including * + * HDF5 tests) which uses the subfiling VFD on Perlmutter * + * at the National Energy Research Scientific Computing * + * Center, NERSC. * + * Doing so may cause a system disruption due to subfiling * + * crashing Lustre. The system's Lustre bug is expected * + * to be resolved by 2023. * + * * + ************************************************************ + + There is a bug in OpenMPI 4.1.0-4.1.4 that can result in incorrect + results from MPI I/O requests unless one of the following parameters + is passed to mpirun: + + --mca io ^ompio + + --mca fbtl_posix_read_data_sieving 0 + + This bug has been fixed in later versions of OpenMPI. + + Further discussion can be found here: + + https://www.hdfgroup.org/2022/11/workarounds-for-openmpi-bug-exposed-by-make-check-in-hdf5-1-13-3/ + + When using the subfiling feature with OpenMPI it is often necessary to + increase the maximum number of threads: + + --mca common_pami_max_threads 4096 + + There is a bug in MPICH 4.0.0-4.0.3 where using device=ch4:ofi (the default) + can cause failures in the testphdf5 test program. Using ch4:ucx or ch3 + allows the test to pass. The bug appears to be fixed in the upcoming 4.1 + release. + + These MPI implementation bugs may also be present in implementations derived + from OpenMPI or MPICH. The workarounds listed above may need to be adjusted + to match the derived implementation, or in some cases, there may be no + workaround. + + The accum test fails on MacOS 12.6.2 (Monterey) with clang 14.0.0. The + reason for this failure and its impact are unknown. + + The onion test has failures on Windows when built using Intel OneAPI + 2022.3. The cause of these failures is under investigation. + + CMake files do not behave correctly with paths containing spaces. + Do not use spaces in paths because the required escaping for handling spaces + results in very complex and fragile build files. + ADB - 2019/05/07 + + At present, metadata cache images may not be generated by parallel + applications. Parallel applications can read files with metadata cache + images, but since this is a collective operation, a deadlock is possible + if one or more processes do not participate. + + CPP ptable test fails on both VS2017 and VS2019 with Intel compiler, JIRA + issue: HDFFV-10628. This test will pass with VS2015 with Intel compiler. + + The subsetting option in ph5diff currently will fail and should be avoided. + The subsetting option works correctly in serial h5diff. + + Several tests currently fail on certain platforms: + MPI_TEST-t_bigio fails with spectrum-mpi on ppc64le platforms. + + MPI_TEST-t_subfiling_vfd and MPI_TEST_EXAMPLES-ph5_subfiling fail with + cray-mpich on theta and with XL compilers on ppc64le platforms. + + MPI_TEST_testphdf5_tldsc fails with cray-mpich 7.7 on cori and theta. + + Known problems in previous releases can be found in the HISTORY*.txt files + in the HDF5 source. Please report any new problems found to + help@hdfgroup.org. + + +CMake vs. Autotools installations +================================= +While both build systems produce similar results, there are differences. +Each system produces the same set of folders on linux (only CMake works +on standard Windows); bin, include, lib and share. Autotools places the +COPYING and RELEASE.txt file in the root folder, CMake places them in +the share folder. + +The bin folder contains the tools and the build scripts. Additionally, CMake +creates dynamic versions of the tools with the suffix "-shared". Autotools +installs one set of tools depending on the "--enable-shared" configuration +option. + build scripts + ------------- + Autotools: h5c++, h5cc, h5fc + CMake: h5c++, h5cc, h5hlc++, h5hlcc + +The include folder holds the header files and the fortran mod files. CMake +places the fortran mod files into separate shared and static subfolders, +while Autotools places one set of mod files into the include folder. Because +CMake produces a tools library, the header files for tools will appear in +the include folder. + +The lib folder contains the library files, and CMake adds the pkgconfig +subfolder with the hdf5*.pc files used by the bin/build scripts created by +the CMake build. CMake separates the C interface code from the fortran code by +creating C-stub libraries for each Fortran library. In addition, only CMake +installs the tools library. The names of the szip libraries are different +between the build systems. + +The share folder will have the most differences because CMake builds include +a number of CMake specific files for support of CMake's find_package and support +for the HDF5 Examples CMake project. + +The issues with the gif tool are: + HDFFV-10592 CVE-2018-17433 + HDFFV-10593 CVE-2018-17436 + HDFFV-11048 CVE-2020-10809 +These CVE issues have not yet been addressed and are avoided by not building +the gif tool by default. Enable building the High-Level tools with these options: + autotools: --enable-hltools + cmake: HDF5_BUILD_HL_TOOLS=ON diff --git a/release_docs/INSTALL_Autotools.txt b/release_docs/INSTALL_Autotools.txt index dc394be4521..325090aadd4 100644 --- a/release_docs/INSTALL_Autotools.txt +++ b/release_docs/INSTALL_Autotools.txt @@ -27,10 +27,10 @@ Obtaining HDF5 source code 2. Obtain HDF5 source from Github development branch: https://github.com/HDFGroup/hdf5 last release: https://github.com/HDFGroup/hdf5/releases/latest - hdf5-1_14_"X".tar.gz or hdf5-1_14_"X".zip + hdf5-1_17_"X".tar.gz or hdf5-1_17_"X".zip and put it in "myhdfstuff". - Uncompress the file. There should be a hdf5-1.14."X" folder. + Uncompress the file. There should be a hdf5-1.17."X" folder. ======================================================================== @@ -345,10 +345,12 @@ III. Full installation instructions for source distributions 3.11. Backward compatibility - The 1.15 version of the HDF5 library can be configured to operate - identically to the v1.14 library with the + The 1.17 version of the HDF5 library can be configured to operate + identically to the v1.16 library with the + --with-default-api-version=v116 + configure flag, or identically to the v1.14 library with the --with-default-api-version=v114 - identically to the v1.12 library with the + configure flag, or identically to the v1.12 library with the --with-default-api-version=v112 configure flag, or identically to the v1.10 library with the --with-default-api-version=v110 @@ -357,7 +359,7 @@ III. Full installation instructions for source distributions configure flag, or identically to the v1.6 library with the --with-default-api-version=v16 configure flag. This allows existing code to be compiled with the - v1.15 library without requiring immediate changes to the application + v1.17 library without requiring immediate changes to the application source code. For additional configuration options and other details, see "API Compatibility Macros": diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index b2bd84c20f3..6db1b29cc6f 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -32,11 +32,11 @@ Obtaining HDF5 source code hdf5-1_15_"X".tar.gz or hdf5-1_15_"X".zip and put it in "myhdfstuff". - Uncompress the file. There should be a hdf5-1.15."X" folder. + Uncompress the file. There should be a hdf5-1.17."X" folder. CMake version 1. We suggest you obtain the latest CMake from the Kitware web site. - The HDF5 1.15."X" product requires a minimum CMake version 3.18, + The HDF5 1.17."X" product requires a minimum CMake version 3.18, where "X" is the current HDF5 release version. If you are using VS2022, the minimum version is 3.21. @@ -85,7 +85,7 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: 2. Download/copy the individual files mentioned above to "myhdfstuff". Do not uncompress the tar.gz files. - 3. Change to the source directory "hdf5-1.15.x". + 3. Change to the source directory "hdf5-1.17.x". CTestScript.cmake file should not be modified. 4. Edit the platform configuration file, HDF5options.cmake, if you want to change @@ -113,7 +113,7 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: The command above will configure, build, test, and create an install package in the myhdfstuff folder. It will have the format: - HDF5-1.15.NN-. + HDF5-1.17.NN-. On Unix, will be "Linux". A similar .sh file will also be created. On Windows, will be "win64" or "win32". If you have an @@ -134,13 +134,13 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: 6. To install, "X" is the current release version On Windows (with WiX installed), execute: - HDF5-1.15."X"-win32.msi or HDF5-1.15."X"-win64.msi + HDF5-1.17."X"-win32.msi or HDF5-1.17."X"-win64.msi By default this program will install the hdf5 library into the "C:\Program Files" directory and will create the following directory structure: HDF_Group --HDF5 - ----1.15."X" + ----1.17."X" ------bin ------include ------lib @@ -149,29 +149,29 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: On Linux, change to the install destination directory (create it if doesn't exist) and execute: - /myhdfstuff/HDF5-1.15."X"-Linux.sh + /myhdfstuff/HDF5-1.17."X"-Linux.sh After accepting the license, the script will prompt: By default the HDF5 will be installed in: - "/HDF5-1.15."X"-Linux" - Do you want to include the subdirectory HDF5-1.15."X"-Linux? + "/HDF5-1.17."X"-Linux" + Do you want to include the subdirectory HDF5-1.17."X"-Linux? Saying no will install in: "" [Yn]: Note that the script will create the following directory structure relative to the install point: HDF_Group --HDF5 - ----1.15."X" + ----1.17."X" ------bin ------include ------lib --------plugins ------share - On Mac you will find HDF5-1.15."X"-Darwin.dmg in the myhdfstuff folder. Click + On Mac you will find HDF5-1.17."X"-Darwin.dmg in the myhdfstuff folder. Click on the dmg file to proceed with installation. After accepting the license, there will be a folder with the following structure: HDF_Group --HDF5 - ----1.15."X" + ----1.17."X" ------bin ------include ------lib @@ -179,12 +179,12 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: ------share By default the installation will create the bin, include, lib and cmake - folders in the /HDF_Group/HDF5/1.15."X" + folders in the /HDF_Group/HDF5/1.17."X" The depends on the build platform; Windows will set the default to: - C:/Program Files/HDF_Group/HDF5/1.15."X" + C:/Program Files/HDF_Group/HDF5/1.17."X" Linux will set the default to: - "myhdfstuff/HDF_Group/HDF5/1.15."X" + "myhdfstuff/HDF_Group/HDF5/1.17."X" The default can be changed by adding ",INSTALLDIR=" to the "ctest -S HDF5config.cmake..." command. For example on linux: ctest -S HDF5config.cmake,INSTALLDIR=/usr/local/myhdf5,BUILD_GENERATOR=Unix -C Release -VV -O hdf5.log @@ -211,13 +211,13 @@ Notes: This short set of instructions is written for users who want to 5. Configure the C library, tools and tests with one of the following commands: On Windows 32 bit - cmake -G "Visual Studio 16 2019" -A Win32 -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.15."X" + cmake -G "Visual Studio 16 2019" -A Win32 -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.17."X" On Windows 64 bit - cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.15."X" + cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ..\hdf5-1.17."X" On Linux and Mac - cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ../hdf5-1.15."X" + cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS:BOOL=OFF -DBUILD_TESTING:BOOL=ON -DHDF5_BUILD_TOOLS:BOOL=ON ../hdf5-1.17."X" where "X" is the current release version. @@ -232,13 +232,13 @@ Notes: This short set of instructions is written for users who want to 9. To install On Windows (with WiX installed), execute: - HDF5-1.15."X"-win32.msi or HDF5-1.15."X"-win64.msi + HDF5-1.17."X"-win32.msi or HDF5-1.17."X"-win64.msi By default this program will install the hdf5 library into the "C:\Program Files" directory and will create the following directory structure: HDF_Group --HDF5 - ----1.15."X" + ----1.17."X" ------bin ------include ------lib @@ -247,29 +247,29 @@ Notes: This short set of instructions is written for users who want to On Linux, change to the install destination directory (create if doesn't exist) and execute: - /myhdfstuff/build/HDF5-1.15."X"-Linux.sh + /myhdfstuff/build/HDF5-1.17."X"-Linux.sh After accepting the license, the script will prompt: By default the HDF5 will be installed in: - "/HDF5-1.15."X"-Linux" - Do you want to include the subdirectory HDF5-1.15."X"-Linux? + "/HDF5-1.17."X"-Linux" + Do you want to include the subdirectory HDF5-1.17."X"-Linux? Saying no will install in: "" [Yn]: Note that the script will create the following directory structure relative to the install point: HDF_Group --HDF5 - ----1.15."X" + ----1.17."X" ------bin ------include ------lib --------plugins ------share - On Mac you will find HDF5-1.15."X"-Darwin.dmg in the build folder. Click + On Mac you will find HDF5-1.17."X"-Darwin.dmg in the build folder. Click on the dmg file to proceed with installation. After accepting the license, there will be a folder with the following structure: HDF_Group --HDF5 - ----1.15."X" + ----1.17."X" ------bin ------include ------lib @@ -282,7 +282,7 @@ IV. Further considerations ======================================================================== 1. We suggest you obtain the latest CMake for windows from the Kitware - web site. The HDF5 1.15."X" product requires a minimum CMake version 3.18. + web site. The HDF5 1.17."X" product requires a minimum CMake version 3.18. If you are using VS2022, the CMake minimum version is 3.21. 2. If you plan to use Zlib or Szip: @@ -888,7 +888,7 @@ HDF5_STRICT_FORMAT_CHECKS "Whether to perform strict file format checks" HDF5_WANT_DATA_ACCURACY "IF data accuracy is guaranteed during data conversions" ON HDF5_WANT_DCONV_EXCEPTION "exception handling functions is checked during data conversions" ON -DEFAULT_API_VERSION "Enable default API (v16, v18, v110, v112, v114, v116)" "v116" +DEFAULT_API_VERSION "Enable default API (v16, v18, v110, v112, v114, v116, v118)" "v118" HDF5_USE_FOLDERS "Enable folder grouping of projects in IDEs." ON HDF5_MSVC_NAMING_CONVENTION "Use MSVC Naming conventions for Shared Libraries" OFF HDF5_MINGW_STATIC_GCC_LIBS "Statically link libgcc/libstdc++" OFF diff --git a/release_docs/INSTALL_Cygwin.txt b/release_docs/INSTALL_Cygwin.txt index 3613cc8be71..0c61f95ce48 100644 --- a/release_docs/INSTALL_Cygwin.txt +++ b/release_docs/INSTALL_Cygwin.txt @@ -94,19 +94,19 @@ Build, Test and Install HDF5 on Cygwin The HDF5 source code is distributed in a variety of formats which can be unpacked with the following commands, each of which creates - an `hdf5-1.15.x' directory. + an `hdf5-1.17.x' directory. 2.1 Non-compressed tar archive (*.tar) - $ tar xf hdf5-1.15.x.tar + $ tar xf hdf5-1.17.x.tar 2.2 Gzip'd tar archive (*.tar.gz) - $ gunzip < hdf5-1.15.x.tar.gz | tar xf - + $ gunzip < hdf5-1.17.x.tar.gz | tar xf - 2.3 Bzip'd tar archive (*.tar.bz2) - $ bunzip2 < hdf5-1.15.x.tar.bz2 | tar xf - + $ bunzip2 < hdf5-1.17.x.tar.bz2 | tar xf - 2. Setup Environment diff --git a/release_docs/NEWSLETTER.txt b/release_docs/NEWSLETTER.txt index 3884718c6ed..9e06758f16d 100644 --- a/release_docs/NEWSLETTER.txt +++ b/release_docs/NEWSLETTER.txt @@ -1,4 +1,4 @@ -HDF5 version 1.15.0 currently under development +HDF5 version 1.17.0 currently under development Features included for the next major release: ---------------------------------------------------------------------------- diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 8d5615fcc11..f700c8a266a 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -1,4 +1,4 @@ -HDF5 version 1.15.0 currently under development +HDF5 version 1.17.0 currently under development ================================================================================ @@ -36,7 +36,7 @@ CONTENTS - New Features - Support for new platforms and languages -- Bug Fixes since HDF5-1.14.0 +- Bug Fixes since HDF5-1.16.0 - Platforms Tested - Known Problems - CMake vs. Autotools installations @@ -47,12 +47,6 @@ New Features Configuration: ------------- - - Added signed Windows msi binary and signed Apple dmg binary files. - - The release process now provides signed Windows and Apple installation - binaries in addition to the debian and rpm installation binaries. Also - these installer files are no longer compressed into packaged archives. - - Added configuration option for internal threading/concurrency support: CMake: HDF5_ENABLE_THREADS (ON/OFF) (Default: ON) @@ -65,399 +59,9 @@ New Features disable the 'threadsafe' option, but not vice versa. The 'threads' option must be on to enable the subfiling VFD. - - Moved examples to the HDF5Examples folder in the source tree. - - Moved the C++ and Fortran examples from the examples folder to the HDF5Examples - folder and renamed to TUTR, tutorial. This is referenced from the LearnBasics - doxygen page. - - - Added support for using zlib-ng package as the zlib library: - - CMake: HDF5_USE_ZLIB_NG - Autotools: --enable-zlibng - - Added the option HDF5_USE_ZLIB_NG to allow the replacement of the - default ZLib package by the zlib-ng package as a built-in compression library. - - - Disable CMake UNITY_BUILD for hdf5 - - CMake added a target property, UNITY_BUILD, that when set to true, the target - source files will be combined into batches for faster compilation. By default, - the setting is OFF, but could be enabled by a project that includes HDF5 as a subproject. - - HDF5 has disabled this feature by setting the property to OFF in the HDFMacros.cmake file. - - - Removed "function/code stack" debugging configuration option: - - CMake: HDF5_ENABLE_CODESTACK - Autotools: --enable-codestack - - This was used to debug memory leaks internal to the library, but has been - broken for >1.5 years and is now easily replaced with third-party tools - (e.g. libbacktrace: https://github.com/ianlancetaylor/libbacktrace) on an - as-needed basis when debugging an issue. - - - Added configure options for enabling/disabling non-standard programming - language features - - * Added a new configuration option that allows enabling or disabling of - support for features that are extensions to programming languages, such - as support for the _Float16 datatype: - - CMake: HDF5_ENABLE_NONSTANDARD_FEATURES (ON/OFF) (Default: ON) - Autotools: --enable-nonstandard-features (yes/no) (Default: yes) - - When this option is enabled, configure time checks are still performed - to ensure that a feature can be used properly, but these checks may not - be sufficient when compiler support for a feature is incomplete or broken, - resulting in library build failures. When set to OFF/no, this option - provides a way to disable support for all non-standard features to avoid - these issues. Individual features can still be re-enabled with their - respective configuration options. - - * Added a new configuration option that allows enabling or disabling of - support for the _Float16 C datatype: - - CMake: HDF5_ENABLE_NONSTANDARD_FEATURE_FLOAT16 (ON/OFF) (Default: ON) - Autotools: --enable-nonstandard-feature-float16 (yes/no) (Default: yes) - - While support for the _Float16 C datatype can generally be detected and - used properly, some compilers have incomplete support for the datatype - and will pass configure time checks while still failing to build HDF5. - This option provides a way to disable support for the _Float16 datatype - when the compiler doesn't have the proper support for it. - - - Deprecate bin/cmakehdf5 script - - With the improvements made in CMake since version 3.23 and the addition - of CMake preset files, this script is no longer necessary. - - See INSTALL_CMake.txt file, Section X: Using CMakePresets.json for compiling - - - Overhauled LFS support checks - - In 2024, we can assume that Large File Support (LFS) exists on all - systems we support, though it may require flags to enable it, - particularly when building 32-bit binaries. The HDF5 source does - not use any of the 64-bit specific API calls (e.g., ftello64) - or explicit 64-bit offsets via off64_t. - - Autotools - - * We now use AC_SYS_LARGEFILE to determine how to support LFS. We - previously used a custom m4 script for this. - - CMake - - * The HDF_ENABLE_LARGE_FILE option (advanced) has been removed - * We no longer run a test program to determine if LFS works, which - will help with cross-compiling - * On Linux we now unilaterally set -D_LARGEFILE_SOURCE and - -D_FILE_OFFSET_BITS=64, regardless of 32/64 bit system. CMake - doesn't offer a nice equivalent to AC_SYS_LARGEFILE and since - those options do nothing on 64-bit systems, this seems safe and - covers all our bases. We don't set -D_LARGEFILE64_SOURCE since - we don't use any of the POSIX 64-bit specific API calls like - ftello64, as noted above. - * We didn't test for LFS support on non-Linux platforms. We've added - comments for how LFS should probably be supported on AIX and Solaris, - which seem to be alive, though uncommon. PRs would be appreciated if - anyone wishes to test this. - - This overhaul also fixes GitHub #2395, which points out that the LFS flags - used when building with CMake differ based on whether CMake has been - run before. The LFS check program that caused this problem no longer exists. - - - The CMake HDF5_ENABLE_DEBUG_H5B option has been removed - - This enabled some additional version-1 B-tree checks. These have been - removed so the option is no longer necessary. - - This option was CMake-only and marked as advanced. - - - New option for building with static CRT in Windows - - The following option has been added: - HDF5_BUILD_STATIC_CRT_LIBS "Build With Static Windows CRT Libraries" OFF - Because our minimum CMake is 3.18, the macro to change runtime flags no longer - works as CMake changed the default behavior in CMake 3.15. - - Fixes GitHub issue #3984 - - - Added support for the new MSVC preprocessor - - Microsoft added support for a new, standards-conformant preprocessor - to MSVC, which can be enabled with the /Zc:preprocessor option. This - preprocessor would trip over our HDopen() variadic function-like - macro, which uses a feature that only works with the legacy preprocessor. - - ifdefs have been added that select the correct HDopen() form and - allow building HDF5 with the /Zc:preprocessor option. - - The HDopen() macro is located in an internal header file and only - affects building the HDF5 library from source. - - Fixes GitHub #2515 - - - Renamed HDF5_ENABLE_USING_MEMCHECKER to HDF5_USING_ANALYSIS_TOOL - - The HDF5_USING_ANALYSIS_TOOL is used to indicate to test macros that - an analysis tool is being used and that the tests should not use - the runTest.cmake macros and it's variations. The analysis tools, - like valgrind, test the macro code instead of the program under test. - - HDF5_ENABLE_USING_MEMCHECKER is still used for controlling the HDF5 - define, H5_USING_MEMCHECKER. - - - New option for building and naming tools in CMake - - The following option has been added: - HDF5_BUILD_STATIC_TOOLS "Build Static Tools Not Shared Tools" OFF - - The default will build shared tools unless BUILD_SHARED_LIBS = OFF. - Tools will no longer have "-shared" as only one set of tools will be created. - - - Incorporated HDF5 examples repository into HDF5 library. - - The HDF5Examples folder is equivalent to the hdf5-examples repository. - This enables building and testing the examples - during the library build process or after the library has been installed. - Previously, the hdf5-examples archives were downloaded - for packaging with the library. Now the examples can be built - and tested without a packaged install of the library. - - However, to maintain the ability to use the HDF5Examples with an installed - library, it is necessary to map the option names used by the library - to those used by the examples. The typical pattern is: - = - HDF_BUILD_FORTRAN = ${HDF5_BUILD_FORTRAN} - - - Added new option for CMake to mark tests as SKIPPED. - - HDF5_DISABLE_TESTS_REGEX is a REGEX string that will be checked with - test names and if there is a match then that test's property will be - set to DISABLED. HDF5_DISABLE_TESTS_REGEX can be initialized on the - command line: "-DHDF5_DISABLE_TESTS_REGEX:STRING=" - See CMake documentation for regex-specification. - - - Added defaults to CMake for long double conversion checks - - HDF5 performs a couple of checks at build time to see if long double - values can be converted correctly (IBM's Power architecture uses a - special format for long doubles). These checks were performed using - TRY_RUN, which is a problem when cross-compiling. - - These checks now use default values appropriate for most non-Power - systems when cross-compiling. The cache values can be pre-set if - necessary, which will preempt both the TRY_RUN and the default. - - Affected values: - H5_LDOUBLE_TO_LONG_SPECIAL (default no) - H5_LONG_TO_LDOUBLE_SPECIAL (default no) - H5_LDOUBLE_TO_LLONG_ACCURATE (default yes) - H5_LLONG_TO_LDOUBLE_CORRECT (default yes) - H5_DISABLE_SOME_LDOUBLE_CONV (default no) - - Fixes GitHub #3585 - - - Improved support for Intel oneAPI - - * Separates the old 'classic' Intel compiler settings and warnings - from the oneAPI settings - * Uses `-check nouninit` in debug builds to avoid false positives - when building H5_buildiface with `-check all` - * Both Autotools and CMake - - - Added new options for CMake and Autotools to control the Doxygen - warnings as errors setting. - - * HDF5_ENABLE_DOXY_WARNINGS: ON/OFF (Default: ON) - * --enable-doxygen-errors: enable/disable (Default: enable) - - The default will fail compile if the doxygen parsing generates warnings. - The option can be disabled if certain versions of doxygen have parsing - issues. i.e. 1.9.5, 1.9.8. - - Addresses GitHub issue #3398 - - - Added support for AOCC and classic Flang w/ the Autotools - - * Adds a config/clang-fflags options file to support Flang - * Corrects missing "-Wl," from linker options in the libtool wrappers - when using Flang, the MPI Fortran compiler wrappers, and building - the shared library. This would often result in unrecognized options - like -soname. - * Enable -nomp w/ Flang to avoid linking to the OpenMPI library. - - CMake can build the parallel, shared library w/ Fortran using AOCC - and Flang, so no changes were needed for that build system. - - Fixes GitHub issues #3439, #1588, #366, #280 - - - Converted the build of libaec and zlib to use FETCH_CONTENT with CMake. - - Using the CMake FetchContent module, the external filters can populate - content at configure time via any method supported by the ExternalProject - module. Whereas ExternalProject_Add() downloads at build time, the - FetchContent module makes content available immediately, allowing the - configure step to use the content in commands like add_subdirectory(), - include() or file() operations. - - Removed HDF options for using FETCH_CONTENT explicitly: - BUILD_SZIP_WITH_FETCHCONTENT:BOOL - BUILD_ZLIB_WITH_FETCHCONTENT:BOOL - - - Thread-safety + static library disabled on Windows w/ CMake - - The thread-safety feature requires hooks in DllMain(), which is only - present in the shared library. - - We previously just warned about this, but now any CMake configuration - that tries to build thread-safety and the static library will fail. - This cannot be overridden with ALLOW_UNSUPPORTED. - - Fixes GitHub issue #3613 - - - Autotools builds now build the szip filter by default when an appropriate - library is found - - Since libaec is prevalent and BSD-licensed for both encoding and - decoding, we build the szip filter by default now. - - Both autotools and CMake build systems will process the szip filter the same as - the zlib filter is processed. - - - Removed CMake cross-compiling variables - - * HDF5_USE_PREGEN - * HDF5_BATCH_H5DETECT - - These were used to work around H5detect and H5make_libsettings and - are no longer required. - - - Running H5make_libsettings is no longer required for cross-compiling - - The functionality of H5make_libsettings is now handled via template files, - so H5make_libsettings has been removed. - - - Running H5detect is no longer required for cross-compiling - - The functionality of H5detect is now exercised at library startup, - so H5detect has been removed. - - - Updated HDF5 API tests CMake code to support VOL connectors - - * Implemented support for fetching, building and testing HDF5 - VOL connectors during the library build process and documented - the feature under doc/cmake-vols-fetchcontent.md - - * Implemented the HDF5_TEST_API_INSTALL option that enables - installation of the HDF5 API tests on the system - - - Added new CMake options for building and running HDF5 API tests - (Experimental) - - HDF5 API tests are an experimental feature, primarily targeted - toward HDF5 VOL connector authors, that is currently being developed. - These tests exercise the HDF5 API and are being integrated back - into the HDF5 library from the HDF5 VOL tests repository - (https://github.com/HDFGroup/vol-tests). To support this feature, - the following new options have been added to CMake: - - * HDF5_TEST_API: ON/OFF (Default: OFF) - - Controls whether the HDF5 API tests will be built. These tests - will only be run during testing of HDF5 if the HDF5_TEST_SERIAL - (for serial tests) and HDF5_TEST_PARALLEL (for parallel tests) - options are enabled. - - * HDF5_TEST_API_INSTALL: ON/OFF (Default: OFF) - - Controls whether the HDF5 API test executables will be installed - on the system alongside the HDF5 library. This option is currently - not functional. - - * HDF5_TEST_API_ENABLE_ASYNC: ON/OFF (Default: OFF) - - Controls whether the HDF5 Async API tests will be built. These - tests will only be run if the VOL connector used supports Async - operations. - - * HDF5_TEST_API_ENABLE_DRIVER: ON/OFF (Default: OFF) - - Controls whether to build the HDF5 API test driver program. This - test driver program is useful for VOL connectors that use a - client/server model where the server needs to be up and running - before the VOL connector can function. This option is currently - not functional. - - * HDF5_TEST_API_SERVER: String (Default: "") - - Used to specify a path to the server executable that the test - driver program should execute. - - - Added support for CMake presets file. - - CMake supports two main files, CMakePresets.json and CMakeUserPresets.json, - that allow users to specify common configure options and share them with others. - HDF added a CMakePresets.json file of a typical configuration and support - file, config/cmake-presets/hidden-presets.json. - Also added a section to INSTALL_CMake.txt with very basic explanation of the - process to use CMakePresets. - - - Deprecated and removed old SZIP library in favor of LIBAEC library - - LIBAEC library has been used in HDF5 binaries as the szip library of choice - for a few years. We are removing the options for using the old SZIP library. - - Also removed the config/cmake/FindSZIP.cmake file. - - - Enabled instrumentation of the library by default in CMake for parallel - debug builds - - HDF5 can be configured to instrument portions of the parallel library to - aid in debugging. Autotools builds of HDF5 turn this capability on by - default for parallel debug builds and off by default for other build types. - CMake has been updated to match this behavior. - - - Added new option to build libaec and zlib inline with CMake. - - Using the CMake FetchContent module, the external filters can populate - content at configure time via any method supported by the ExternalProject - module. Whereas ExternalProject_Add() downloads at build time, the - FetchContent module makes content available immediately, allowing the - configure step to use the content in commands like add_subdirectory(), - include() or file() operations. - - The HDF options (and defaults) for using this are: - BUILD_SZIP_WITH_FETCHCONTENT:BOOL=OFF - LIBAEC_USE_LOCALCONTENT:BOOL=OFF - BUILD_ZLIB_WITH_FETCHCONTENT:BOOL=OFF - ZLIB_USE_LOCALCONTENT:BOOL=OFF - - The CMake variables to control the path and file names: - LIBAEC_TGZ_ORIGPATH:STRING - LIBAEC_TGZ_ORIGNAME:STRING - ZLIB_TGZ_ORIGPATH:STRING - ZLIB_TGZ_ORIGNAME:STRING - - See the CMakeFilters.cmake and config/cmake/cacheinit.cmake files for usage. - - - Added the CMake variable HDF5_ENABLE_ROS3_VFD to the HDF5 CMake config - file hdf5-config.cmake. This allows to easily detect if the library - has been built with or without read-only S3 functionality. Library: -------- - - Added new routines for interacting with error stacks: H5Epause_stack, - H5Eresume_stack, and H5Eis_paused. These routines can be used to - indicate that errors from a call to an HDF5 routine should not be - pushed on to an error stack. Primarily targeted toward 3rd-party - developers of Virtual File Drivirs (VFDs) and Virtual Object Layer (VOL) - connectors, these routines allow developers to perform "speculative" - operations (such as trying to open a file or object) without requiring - that the error stack be cleared after a speculative operation fails. - - H5Pset_external() now uses HDoff_t, which is always a 64-bit type The H5Pset_external() call took an off_t parameter in HDF5 1.14.x and @@ -474,302 +78,16 @@ New Features Fixes GitHub issue #3506 - - Relaxed behavior of H5Pset_page_buffer_size() when opening files - - This API call sets the size of a file's page buffer cache. This call - was extremely strict about matching its parameters to the file strategy - and page size used to create the file, requiring a separate open of the - file to obtain these parameters. - - These requirements have been relaxed when using the fapl to open - a previously-created file: - - * When opening a file that does not use the H5F_FSPACE_STRATEGY_PAGE - strategy, the setting is ignored and the file will be opened, but - without a page buffer cache. This was previously an error. - - * When opening a file that has a page size larger than the desired - page buffer cache size, the page buffer cache size will be increased - to the file's page size. This was previously an error. - - The behavior when creating a file using H5Pset_page_buffer_size() is - unchanged. - - Fixes GitHub issue #3382 - - - Added support for _Float16 16-bit half-precision floating-point datatype - - Support for the _Float16 C datatype has been added on platforms where: - - - The _Float16 datatype and its associated macros (FLT16_MIN, FLT16_MAX, - FLT16_EPSILON, etc.) are available - - A simple test program that converts between the _Float16 datatype and - other datatypes with casts can be successfully compiled and run at - configure time. Some compilers appear to be buggy or feature-incomplete - in this regard and will generate calls to compiler-internal functions - for converting between the _Float16 datatype and other datatypes, but - will not link these functions into the build, resulting in build - failures. - - The following new macros have been added: - - H5_HAVE__FLOAT16 - This macro is defined in H5pubconf.h and will have - the value 1 if support for the _Float16 datatype is - available. It will not be defined otherwise. - - H5_SIZEOF__FLOAT16 - This macro is defined in H5pubconf.h and will have - a value corresponding to the size of the _Float16 - datatype, as computed by sizeof(). It will have the - value 0 if support for the _Float16 datatype is not - available. - - H5_HAVE_FABSF16 - This macro is defined in H5pubconf.h and will have the - value 1 if the fabsf16 function is available for use. - - H5_LDOUBLE_TO_FLOAT16_CORRECT - This macro is defined in H5pubconf.h and - will have the value 1 if the platform can - correctly convert long double values to - _Float16. Some compilers have issues with - this. - - H5T_NATIVE_FLOAT16 - This macro maps to the ID of an HDF5 datatype representing - the native C _Float16 datatype for the platform. If - support for the _Float16 datatype is not available, the - macro will map to H5I_INVALID_HID and should not be used. - - H5T_IEEE_F16BE - This macro maps to the ID of an HDF5 datatype representing - a big-endian IEEE 754 16-bit floating-point datatype. This - datatype is available regardless of whether _Float16 support - is available or not. - - H5T_IEEE_F16LE - This macro maps to the ID of an HDF5 datatype representing - a little-endian IEEE 754 16-bit floating-point datatype. - This datatype is available regardless of whether _Float16 - support is available or not. - - The following new hard datatype conversion paths have been added, but - will only be used when _Float16 support is available: - - H5T_NATIVE_SCHAR <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_UCHAR <-> H5T_NATIVE_FLOAT16 - H5T_NATIVE_SHORT <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_USHORT <-> H5T_NATIVE_FLOAT16 - H5T_NATIVE_INT <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_UINT <-> H5T_NATIVE_FLOAT16 - H5T_NATIVE_LONG <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_ULONG <-> H5T_NATIVE_FLOAT16 - H5T_NATIVE_LLONG <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_ULLONG <-> H5T_NATIVE_FLOAT16 - H5T_NATIVE_FLOAT <-> H5T_NATIVE_FLOAT16 | H5T_NATIVE_DOUBLE <-> H5T_NATIVE_FLOAT16 - H5T_NATIVE_LDOUBLE <-> H5T_NATIVE_FLOAT16 - - The H5T_NATIVE_LDOUBLE -> H5T_NATIVE_FLOAT16 hard conversion path will only - be available and used if H5_LDOUBLE_TO_FLOAT16_CORRECT has a value of 1. Otherwise, - the conversion will be emulated in software by the library. - - Note that in the absence of any compiler flags for architecture-specific - tuning, the generated code for datatype conversions with the _Float16 type - may perform conversions by first promoting the type to float. Use of - architecture-specific tuning compiler flags may instead allow for the - generation of specialized instructions, such as AVX512-FP16 instructions, - if available. - - - Made several improvements to the datatype conversion code - - * The datatype conversion code was refactored to use pointers to - H5T_t datatype structures internally rather than IDs wrapping - the pointers to those structures. These IDs are needed if an - application-registered conversion function or conversion exception - function are involved during the conversion process. For simplicity, - the conversion code simply passed these IDs down and let the internal - code unwrap the IDs as necessary when needing to access the wrapped - H5T_t structures. However, this could cause a significant amount of - repeated ID lookups for compound datatypes and other container-like - datatypes. The code now passes down pointers to the datatype - structures and only creates IDs to wrap those pointers as necessary. - Quick testing showed an average ~3x to ~10x improvement in performance - of conversions on container-like datatypes, depending on the - complexity of the datatype. - - * A conversion "context" structure was added to hold information about - the current conversion being performed. This allows conversions on - container-like datatypes to be optimized better by skipping certain - portions of the conversion process that remain relatively constant - when multiple elements of the container-like datatype are being - converted. - - * After refactoring the datatype conversion code to use pointers - internally rather than IDs, several copies of datatypes that were - made by higher levels of the library were able to be removed. The - internal IDs that were previously registered to wrap those copied - datatypes were also able to be removed. - - - Implemented optimized support for vector I/O in the Subfiling VFD - - Previously, the Subfiling VFD would handle vector I/O requests by - breaking them down into individual I/O requests, one for each entry - in the I/O vectors provided. This could result in poor I/O performance - for features in HDF5 that utilize vector I/O, such as parallel I/O - to filtered datasets. The Subfiling VFD now properly handles vector - I/O requests in their entirety, resulting in fewer I/O calls, improved - vector I/O performance and improved vector I/O memory efficiency. - - - Added a simple cache to the read-only S3 (ros3) VFD - - The read-only S3 VFD now caches the first N bytes of a file stored - in S3 to avoid a lot of small I/O operations when opening files. - This cache is per-file and created when the file is opened. - - N is currently 16 MiB or the size of the file, whichever is smaller. - - Addresses GitHub issue #3381 - - - Added new API function H5Pget_actual_selection_io_mode() - - This function allows the user to determine if the library performed - selection I/O, vector I/O, or scalar (legacy) I/O during the last HDF5 - operation performed with the provided DXPL. - - - Added support for in-place type conversion in most cases - - In-place type conversion allows the library to perform type conversion - without an intermediate type conversion buffer. This can improve - performance by allowing I/O in a single operation over the entire - selection instead of being limited by the size of the intermediate buffer. - Implemented for I/O on contiguous and chunked datasets when the selection - is contiguous in memory and when the memory datatype is not smaller than - the file datatype. - - - Changed selection I/O to be on by default when using the MPIO file driver - - - Added support for selection I/O in the MPIO file driver - - Previously, only vector I/O operations were supported. Support for - selection I/O should improve performance and reduce memory uses in some - cases. - - - Changed the error handling for a not found path in the find plugin process. - - While attempting to load a plugin the HDF5 library will fail if one of the - directories in the plugin paths does not exist, even if there are more paths - to check. Instead of exiting the function with an error, just logged the error - and continue processing the list of paths to check. - - - Implemented support for temporary security credentials for the Read-Only - S3 (ROS3) file driver. - - When using temporary security credentials, one also needs to specify a - session/security token next to the access key id and secret access key. - This token can be specified by the new API function H5Pset_fapl_ros3_token(). - The API function H5Pget_fapl_ros3_token() can be used to retrieve - the currently set token. - - - Added a Subfiling VFD configuration file prefix environment variable - - The Subfiling VFD now checks for values set in a new environment - variable "H5FD_SUBFILING_CONFIG_FILE_PREFIX" to determine if the - application has specified a pathname prefix to apply to the file - path for its configuration file. For example, this can be useful - for cases where the application wishes to write subfiles to a - machine's node-local storage while placing the subfiling configuration - file on a file system readable by all machine nodes. - - - Added H5Pset_selection_io(), H5Pget_selection_io(), and - H5Pget_no_selection_io_cause() API functions to manage the selection I/O - feature. This can be used to enable collective I/O with type conversion, - or it can be used with custom VFDs that support vector or selection I/O. - - - Added H5Pset_modify_write_buf() and H5Pget_modify_write_buf() API - functions to allow the library to modify the contents of write buffers, in - order to avoid malloc/memcpy. Currently only used for type conversion - with selection I/O. - Parallel Library: ----------------- - - Added optimized support for the parallel compression feature when - using the multi-dataset I/O API routines collectively - - Previously, calling H5Dwrite_multi/H5Dread_multi collectively in parallel - with a list containing one or more filtered datasets would cause HDF5 to - break out of the optimized multi-dataset I/O mode and instead perform I/O - by looping over each dataset in the I/O request. The library has now been - updated to perform I/O in a more optimized manner in this case by first - performing I/O on all the filtered datasets at once and then performing - I/O on all the unfiltered datasets at once. - - - Changed H5Pset_evict_on_close so that it can be called with a parallel - build of HDF5 - - Previously, H5Pset_evict_on_close would always fail when called from a - parallel build of HDF5, stating that the feature is not supported with - parallel HDF5. This failure would occur even if a parallel build of HDF5 - was used with a serial HDF5 application. H5Pset_evict_on_close can now - be called regardless of the library build type and the library will - instead fail during H5Fcreate/H5Fopen if the "evict on close" property - has been set to true and the file is being opened for parallel access - with more than 1 MPI process. + - Fortran Library: ---------------- + - - - Add Fortran H5R APIs: - h5rcreate_attr_f, h5rcreate_object_f, h5rcreate_region_f, - h5ropen_attr_f, h5ropen_object_f, h5ropen_region_f, - h5rget_file_name_f, h5rget_attr_name_f, h5rget_obj_name_f, - h5rcopy_f, h5requal_f, h5rdestroy_f, h5rget_type_f - - - Added Fortran H5E APIs: - h5eregister_class_f, h5eunregister_class_f, h5ecreate_msg_f, h5eclose_msg_f - h5eget_msg_f, h5epush_f, h5eget_num_f, h5ewalk_f, h5eget_class_name_f, - h5eappend_stack_f, h5eget_current_stack_f, h5eset_current_stack_f, h5ecreate_stack_f, - h5eclose_stack_f, h5epop_f, h5eprint_f (C h5eprint v2 signature) - - - Added API support for Fortran MPI_F08 module definitions: - Adds support for MPI's MPI_F08 module datatypes: type(MPI_COMM) and type(MPI_INFO) for HDF5 APIs: - H5PSET_FAPL_MPIO_F, H5PGET_FAPL_MPIO_F, H5PSET_MPI_PARAMS_F, H5PGET_MPI_PARAMS_F - Ref. #3951 - - - Added Fortran APIs: - H5FGET_INTENT_F, H5SSEL_ITER_CREATE_F, H5SSEL_ITER_GET_SEQ_LIST_F, - H5SSEL_ITER_CLOSE_F, H5S_mp_H5SSEL_ITER_RESET_F - - - Added Fortran Parameters: - H5S_SEL_ITER_GET_SEQ_LIST_SORTED_F, H5S_SEL_ITER_SHARE_WITH_DATASPACE_F - - - Added Fortran Parameters: - H5S_BLOCK_F and H5S_PLIST_F - - - The configuration definitions file, H5config_f.inc, is now installed - and the HDF5 version number has been added to it. - - - Added Fortran APIs: - h5fdelete_f - - - Added Fortran APIs: - h5vlnative_addr_to_token_f and h5vlnative_token_to_address_f - - - Fixed an uninitialized error return value for hdferr - to return the error state of the h5aopen_by_idx_f API. - - - Added h5pget_vol_cap_flags_f and related Fortran VOL - capability definitions. - - - Fortran async APIs H5A, H5D, H5ES, H5G, H5F, H5L and H5O were added. - - - Added Fortran APIs: - h5pset_selection_io_f, h5pget_selection_io_f, - h5pget_actual_selection_io_mode_f, - h5pset_modify_write_buf_f, h5pget_modify_write_buf_f - - - Added Fortran APIs: - h5get_free_list_sizes_f, h5dwrite_chunk_f, h5dread_chunk_f, - h5fget_info_f, h5lvisit_f, h5lvisit_by_name_f, - h5pget_no_selection_io_cause_f, h5pget_mpio_no_collective_cause_f, - h5sselect_shape_same_f, h5sselect_intersect_block_f, - h5pget_file_space_page_size_f, h5pset_file_space_page_size_f, - h5pget_file_space_strategy_f, h5pset_file_space_strategy_f - - - Removed "-commons" linking option on Darwin, as COMMON and EQUIVALENCE - are no longer used in the Fortran source. - - Fixes GitHub issue #3571 C++ Library: ------------ @@ -783,27 +101,12 @@ New Features Tools: ------ - - Add doxygen files for the tools - - Implement the tools usage text as pages in doxygen. - - - Add option to adjust the page buffer size in tools - - The page buffer cache size for a file can now be adjusted using the - --page-buffer-size=N - option in the h5repack, h5diff, h5dump, h5ls, and h5stat tools. This - will call the H5Pset_page_buffer_size() API function with the specified - size in bytes. - - - Allow h5repack to reserve space for a user block without a file - - This is useful for users who want to reserve space - in the file for future use without requiring a file to copy. + - High-Level APIs: ---------------- - - Added Fortran HL API: h5doappend_f + - C Packet Table API: @@ -825,981 +128,16 @@ Support for new platforms, languages and compilers ================================================== - -Bug Fixes since HDF5-1.14.0 release +Bug Fixes since HDF5-1.16.0 release =================================== Library ------- - - Fixed a bug with large external datasets - - When performing a large I/O on an external dataset, the library would only - issue a single read or write system call. This could cause errors or cause - the data to be incorrect. These calls do not guarantee that they will - process the entire I/O request, and may need to be called multiple times - to complete the I/O, advancing the buffer and reducing the size by the - amount actually processed by read or write each time. Implemented this - algorithm for external datasets in both the read and write cases. - - Fixes GitHub #4216 - Fixes h5py GitHub #2394 - - - Fixed a bug in the Subfiling VFD that could cause a buffer over-read - and memory allocation failures - - When performing vector I/O with the Subfiling VFD, making use of the - vector I/O size extension functionality could cause the VFD to read - past the end of the "I/O sizes" array that is passed in. When an entry - in the "I/O sizes" array has the value 0 and that entry is at an array - index greater than 0, this signifies that the value in the preceding - array entry should be used for the rest of the I/O vectors, effectively - extending the last valid I/O size across the remaining entries. This - allows an application to save a bit on memory by passing in a smaller - "I/O sizes" array. The Subfiling VFD didn't implement a check for this - functionality in the portion of the code that generates I/O vectors, - causing it to read past the end of the "I/O sizes" array when it was - shorter than expected. This could also result in memory allocation - failures, as the nearby memory allocations are based off the values - read from that array, which could be uninitialized. - - - Fixed H5Rget_attr_name to return the length of the attribute's name - without the null terminator - - H5Rget_file_name and H5Rget_obj_name both return the name's length - without the null terminator. H5Rget_attr_name now behaves consistently - with the other two APIs. Going forward, all the get character string - APIs in HDF5 will be modified/written in this manner, regarding the - length of a character string. - - - Fixed library to allow usage of page buffering feature for serial file - access with parallel builds of HDF5 - - When HDF5 is built with parallel support enabled, the library would previously - disallow any usage of page buffering, even if a file was not opened with - parallel access. The library now allows usage of page buffering for serial - file access with parallel builds of HDF5. Usage of page buffering is still - disabled for any form of parallel file access, even if only 1 MPI process - is used. - - - Fixed a leak of datatype IDs created internally during datatype conversion - - Fixed an issue where the library could leak IDs that it creates internally - for compound datatype members during datatype conversion. When the library's - table of datatype conversion functions is modified (such as when a new - conversion function is registered with the library from within an application), - the compound datatype conversion function has to recalculate data that it - has cached. When recalculating that data, the library was registering new - IDs for each of the members of the source and destination compound datatypes - involved in the conversion process and was overwriting the old cached IDs - without first closing them. This would result in use-after-free issues due - to multiple IDs pointing to the same internal H5T_t structure, as well as - crashes due to the library not gracefully handling partially initialized or - partially freed datatypes on library termination. - - Fixes h5py GitHub #2419 - - - Fixed function H5Requal actually to compare the reference pointers - - Fixed an issue with H5Requal always returning true because the - function was only comparing the ref2_ptr to itself. - - - Fixed infinite loop closing library issue when h5dump with a user provided test file - - The library's metadata cache calls the "get_final_load_size" client callback - to find out the actual size of the object header. As the size obtained - exceeds the file's EOA, it throws an error but the object header structure - allocated through the client callback is not freed hence causing the - issue described. - - (1) Free the structure allocated in the object header client callback after - saving the needed information in udata. (2) Deserialize the object header - prefix in the object header's "deserialize" callback regardless. - - Fixes GitHub #3790 - - - Fixed many (future) CVE issues - - A partner organization corrected many potential security issues, which - were fixed and reported to us before submission to MITRE. These do - not have formal CVE issues assigned to them yet, so the numbers assigned - here are just placeholders. We will update the HDF5 1.14 CVE list (link - below) when official MITRE CVE tracking numbers are assigned. - - These CVE issues are generally of the same form as other reported HDF5 - CVE issues, and rely on the library failing while attempting to read - a malformed file. Most of them cause the library to segfault and will - probably be assigned "medium (~5/10)" scores by NIST, like the other - HDF5 CVE issues. - - The issues that were reported to us have all been fixed in this release, - so HDF5 will continue to have no unfixed public CVE issues. - - NOTE: HDF5 versions earlier than 1.14.4 should be considered vulnerable - to these issues and users should upgrade to 1.14.4 as soon as - possible. Note that it's possible to build the 1.14 library with - HDF5 1.8, 1.10, etc. API bindings for people who wish to enjoy - the benefits of a more secure library but don't want to upgrade - to the latest API. We will not be bringing the CVE fixes to earlier - versions of the library (they are no longer supported). - - LIST OF CVE ISSUES FIXED IN THIS RELEASE: - - * CVE-2024-0116-001 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5D__scatter_mem resulting in causing denial of service or potential - code execution - - * CVE-2024-0112-001 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5S__point_deserialize resulting in the corruption of the - instruction pointer and causing denial of service or potential code - execution - - * CVE-2024-0111-001 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5T__conv_struct_opt resulting in causing denial of service or - potential code execution - - * CVE-2023-1208-002 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5O__mtime_new_encode resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1208-001 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5O__layout_encode resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1207-001 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5O__dtype_encode_helper causing denial of service or potential - code execution - - * CVE-2023-1205-001 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5VM_array_fill resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1202-002 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5T__get_native_type resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1202-001 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5T__ref_mem_setnull resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1130-001 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5T_copy_reopen resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1125-001 - HDF5 versions <= 1.14.3 contain a heap buffer overflow in - H5Z__nbit_decompress_one_byte caused by the earlier use of an - initialized pointer. This may result in Denial of Service or - potential code execution - - * CVE-2023-1114-001 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5HG_read resulting in the corruption of the instruction pointer - and causing denial of service or potential code execution - - * CVE-2023-1113-002 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5F_addr_decode_len resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1113-001 - HDF5 versions <= 1.14.3 contain a heap buffer overflow caused by - the unsafe use of strdup in H5MM_xstrdup, resulting in denial of - service or potential code execution - - * CVE-2023-1108-001 - HDF5 versions <= 1.14.3 contain a out-of-bounds read operation in - H5FL_arr_malloc resulting in denial of service or potential code - execution - - * CVE-2023-1104-004 - HDF5 versions <= 1.14.3 contain a out-of-bounds read operation in - H5T_close_real resulting in denial of service or potential code - execution - - * CVE-2023-1104-003 - HDF5 library versions <=1.14.3 contain a heap buffer overflow flaw - in the function H5HL__fl_deserialize resulting in denial of service - or potential code execution - - * CVE-2023-1104-002 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5HL__fl_deserialize resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1104-001 - HDF5 library versions <=1.14.3 contains a stack overflow in the - function H5E_printf_stack resulting in denial of service or - potential code execution - - * CVE-2023-1023-001 - HDF5 library versions <=1.14.3 heap buffer overflow in - H5VM_memcpyvv which may result in denial of service or code - execution - - * CVE-2023-1019-001 - HDF5 library versions <=1.14.3 contain a stack buffer overflow in - H5VM_memcpyvv resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1018-001 - HDF5 library versions <=1.14.3 contain a memory corruption in - H5A__close resulting in the corruption of the instruction pointer - and causing denial of service or potential code execution - - * CVE-2023-1017-002 - HDF5 library versions <=1.14.3 may use an uninitialized value - H5A__attr_release_table resulting in denial of service - - * CVE-2023-1017-001 - HDF5 library versions <=1.14.3 may attempt to dereference - uninitialized values in h5tools_str_sprint, which will lead to - denial of service - - * CVE-2023-1013-004 - HDF5 versions <= 1.13.3 contain a stack buffer overflow in - H5HG_read resulting in denial of service or potential code - execution - - * CVE-2023-1013-003 - HDF5 library versions <=1.14.3 contain a buffer overrun in - H5Z__filter_fletcher32 resulting in the corruption of the - instruction pointer and causing denial of service or potential - code execution - - * CVE-2023-1013-002 - HDF5 library versions <=1.14.3 contain a buffer overrun in - H5O__linfo_decode resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1013-001 - HDF5 library versions <=1.14.3 contain a buffer overrun in - H5Z__filter_scaleoffset resulting in the corruption of the - instruction pointer and causing denial of service or potential - code execution - - * CVE-2023-1012-001 - HDF5 library versions <=1.14.3 contain a stack buffer overflow in - H5R__decode_heap resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1010-001 - HDF5 library versions <=1.14.3 contain a stack buffer overflow in - H5FL_arr_malloc resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1009-001 - HDF5 library versions <=1.14.3 contain a stack buffer overflow in - H5FL_arr_malloc resulting in the corruption of the instruction - pointer and causing denial of service or potential code execution - - * CVE-2023-1006-004 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5A__attr_release_table resulting in the corruption of the - instruction pointer and causing denial of service or potential code - execution - - * CVE-2023-1006-003 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5T__bit_find resulting in the corruption of the instruction pointer - and causing denial of service or potential code execution. - - * CVE-2023-1006-002 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5HG_read resulting in the corruption of the instruction pointer - and causing denial of service or potential code execution - - * CVE-2023-1006-001 - HDF5 library versions <=1.14.3 contain a heap buffer overflow in - H5HG__cache_heap_deserialize resulting in the corruption of the - instruction pointer and causing denial of service or potential code - execution - - FULL OFFICIAL HDF5 CVE list (from mitre.org): - - https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword=HDF5 - - 1.14.x CVE tracking list: - - https://github.com/HDFGroup/hdf5/blob/hdf5_1_14/CVE_list_1_14.md - - HDF5 CVE regression test suite (includes proof-of-concept files): - - https://github.com/HDFGroup/cve_hdf5 - - - Fixed a divide-by-zero issue when a corrupt file sets the page size to 0 - - If a corrupt file sets the page buffer size in the superblock to zero, - the library could attempt to divide by zero when allocating space in - the file. The library now checks for valid page buffer sizes when - reading the superblock message. - - Fixes oss-fuzz issue 58762 - - - Fixed a bug when using array datatypes with certain parent types - - Array datatype conversion would never use a background buffer, even if the - array's parent type (what the array is an array of) required a background - buffer for conversion. This resulted in crashes in some cases when using - an array of compound, variable length, or reference datatypes. Array types - now use a background buffer if needed by the parent type. - - - Fixed potential buffer read overflows in H5PB_read - - H5PB_read previously did not account for the fact that the size of the - read it's performing could overflow the page buffer pointer, depending - on the calculated offset for the read. This has been fixed by adjusting - the size of the read if it's determined that it would overflow the page. - - - Fixed CVE-2017-17507 - - This CVE was previously declared fixed, but later testing with a static - build of HDF5 showed that it was not fixed. - - When parsing a malformed (fuzzed) compound type containing variable-length - string members, the library could produce a segmentation fault, crashing - the library. - - This was fixed after GitHub PR #4234 - - Fixes GitHub issue #3446 - - - Fixed a cache assert with very large metadata objects - - If the library tries to load a metadata object that is above a - certain size, this would trip an assert in debug builds. This could - happen if you create a very large number of links in an old-style - group that uses local heaps. - - There is no need for this assert. The library's metadata cache - can handle large objects. The assert has been removed. - - Fixes GitHub #3762 - - - Fixed an issue with the Subfiling VFD and multiple opens of a - file - - An issue with the way the Subfiling VFD handles multiple opens - of the same file caused the file structures for the extra opens - to occasionally get mapped to an incorrect subfiling context - object. The VFD now correctly maps the file structures for - additional opens of an already open file to the same context - object. - - - Fixed a bug that causes the library to incorrectly identify - the endian-ness of 16-bit and smaller C floating-point datatypes - - When detecting the endian-ness of an in-memory C floating-point - datatype, the library previously always assumed that the type - was at least 32 bits in size. This resulted in invalid memory - accesses and would usually cause the library to identify the - datatype as having an endian-ness of H5T_ORDER_VAX. This has - now been fixed. - - - Fixed a bug that causes an invalid memory access issue when - converting 16-bit floating-point values to integers with the - library's software conversion function - - The H5T__conv_f_i function previously always assumed that - floating-point values were at least 32 bits in size and would - access invalid memory when attempting to convert 16-bit - floating-point values to integers. To fix this, parts of the - H5T__conv_f_i function had to be rewritten, which also resulted - in a significant speedup when converting floating-point values - to integers where the library does not have a hard conversion - path. This is the case for any floating-point values with a - datatype not represented by H5T_NATIVE_FLOAT16 (if _Float16 is - supported), H5T_NATIVE_FLOAT, H5T_NATIVE_DOUBLE or - H5T_NATIVE_LDOUBLE. - - - Fixed a bug that can cause incorrect data when overflows occur - while converting integer values to floating-point values with - the library's software conversion function - - The H5T__conv_i_f function had a bug which previously caused it - to return incorrect data when an overflow occurs and an application's - conversion exception callback function decides not to handle the - overflow. Rather than return positive infinity, the library would - return truncated data. This has now been fixed. - - - Corrected H5Soffset_simple() when offset is NULL - - The reference manual states that the offset parameter of H5Soffset_simple() - can be set to NULL to reset the offset of a simple dataspace to 0. This - has never been true, and passing NULL was regarded as an error. - - The library will now accept NULL for the offset parameter and will - correctly set the offset to zero. - - Fixes HDFFV-9299 - - - Fixed an issue where the Subfiling VFD's context object cache could - grow too large - - The Subfiling VFD keeps a cache of its internal context objects to - speed up access to a context object for a particular file, as well - as access to that object across multiple opens of the same file. - However, opening a large amount of files with the Subfiling VFD over - the course of an application's lifetime could cause this cache to grow - too large and result in the application running out of available MPI - communicator objects. On file close, the Subfiling VFD now simply - evicts context objects out of its cache and frees them. It is assumed - that multiple opens of a file will be a less common use case for the - Subfiling VFD, but this can be revisited if it proves to be an issue - for performance. - - - Fixed error when overwriting certain nested variable length types - - Previously, when using a datatype that included a variable length type - within a compound or array within another variable length type, and - overwriting data with a shorter (top level) variable length sequence, an - error could occur. This has been fixed. - - - Take user block into account in H5Dchunk_iter() and H5Dget_chunk_info() - - The address reported by the following functions did not correctly - take the user block into account: - - * H5Dchunk_iter() <-- addr passed to callback - * H5Dget_chunk_info() <-- addr parameter - * H5Dget_chunk_info_by_coord() <-- addr parameter - - This means that these functions reported logical HDF5 file addresses, - which would only be equal to the physical addresses when there is no - user block prepended to the HDF5 file. This is unfortunate, as the - primary use of these functions is to get physical addresses in order - to directly access the chunks. - - The listed functions now correctly take the user block into account, - so they will emit physical addresses that can be used to directly - access the chunks. - - Fixes #3003 - - - Fixed asserts raised by large values of H5Pset_est_link_info() parameters - - If large values for est_num_entries and/or est_name_len were passed - to H5Pset_est_link_info(), the library would attempt to create an - object header NIL message to reserve enough space to hold the links in - compact form (i.e., concatenated), which could exceed allowable object - header message size limits and trip asserts in the library. - - This bug only occurred when using the HDF5 1.8 file format or later and - required the product of the two values to be ~64k more than the size - of any links written to the group, which would cause the library to - write out a too-large NIL spacer message to reserve the space for the - unwritten links. - - The library now inspects the phase change values to see if the dataset - is likely to be compact and checks the size to ensure any NIL spacer - messages won't be larger than the library allows. - - Fixes GitHub #1632 - - - Fixed a bug where H5Tset_fields does not account for any offset - set for a floating-point datatype when determining if values set - for spos, epos, esize, mpos and msize make sense for the datatype - - Previously, H5Tset_fields did not take datatype offsets into account - when determining if the values set make sense for the datatype. - This would cause the function to fail when the precision for a - datatype is correctly set such that the offset bits are not included. - This has now been fixed. - - - Fixed H5Fget_access_plist so that it returns the file locking - settings for a file - - When H5Fget_access_plist (and the internal H5F_get_access_plist) - is called on a file, the returned File Access Property List has - the library's default file locking settings rather than any - settings set for the file. This causes two problems: - - - Opening an HDF5 file through an external link using H5Gopen, - H5Dopen, etc. with H5P_DEFAULT for the Dataset/Group/etc. - Access Property List will cause the external file to be opened - with the library's default file locking settings rather than - inheriting them from the parent file. This can be surprising - when a file is opened with file locking disabled, but its - external files are opened with file locking enabled. - - - An application cannot make use of the H5Pset_elink_fapl - function to match file locking settings between an external - file and its parent file without knowing the correct setting - ahead of time, as calling H5Fget_access_plist on the parent - file will not return the correct settings. - - This has been fixed by copying a file's file locking settings - into the newly-created File Access Property List in H5F_get_access_plist. - - This fix partially addresses GitHub issue #4011 - - - Memory usage growth issue - - Starting with the HDF5 1.12.1 release, an issue (GitHub issue #1256) - was observed where running a simple program that has a loop of opening - a file, reading from an object with a variable-length datatype and - then closing the file would result in the process fairly quickly - running out of memory. Upon further investigation, it was determined - that this memory was being kept around in the library's datatype - conversion pathway cache that is used to speed up datatype conversions - which are repeatedly used within an HDF5 application's lifecycle. For - conversions involving variable-length or reference datatypes, each of - these cached pathway entries keeps a reference to its associated file - for later use. Since the file was being closed and reopened on each - loop iteration, and since the library compares for equality between - instances of opened files (rather than equality of the actual files) - when determining if it can reuse a cached conversion pathway, it was - determining that no cached conversion pathways could be reused and was - creating a new cache entry on each loop iteration during I/O. This - would lead to constant growth of that cache and the memory it consumed, - as well as constant growth of the memory consumed by each cached entry - for the reference to its associated file. - - To fix this issue, the library now removes any cached datatype - conversion path entries for variable-length or reference datatypes - associated with a particular file when that file is closed. - - Fixes GitHub #1256 - - - Suppressed floating-point exceptions in H5T init code - - The floating-point datatype initialization code in H5Tinit_float.c - could raise FE_INVALID exceptions while munging bits and performing - comparisons that might involve NaN. This was not a problem when the - initialization code was executed in H5detect at compile time (prior - to 1.14.3), but now that the code is executed at library startup - (1.14.3+), these exceptions can be caught by user code, as is the - default in the NAG Fortran compiler. - - Starting in 1.14.4, we now suppress floating-point exceptions while - initializing the floating-point types and clear FE_INVALID before - restoring the original environment. - - Fixes GitHub #3831 - - - Fixed a file handle leak in the core VFD - - When opening a file with the core VFD and a file image, if the file - already exists, the file check would leak the POSIX file handle. - - Fixes GitHub issue #635 - - - Fixed some issues with chunk index metadata not getting read - collectively when collective metadata reads are enabled - - When looking up dataset chunks during I/O, the parallel library - temporarily disables collective metadata reads since it's generally - unlikely that the application will read the same chunks from all - MPI ranks. Leaving collective metadata reads enabled during - chunk lookups can lead to hangs or other bad behavior depending - on the chunk indexing structure used for the dataset in question. - However, due to the way that dataset chunk index metadata was - previously loaded in a deferred manner, this could mean that - the metadata for the main chunk index structure or its - accompanying pieces of metadata (e.g., fixed array data blocks) - could end up being read independently if these chunk lookup - operations are the first chunk index-related operation that - occurs on a dataset. This behavior is generally observed when - opening a dataset for which the metadata isn't in the metadata - cache yet and then immediately performing I/O on that dataset. - This behavior is not generally observed when creating a dataset - and then performing I/O on it, as the relevant metadata will - usually be in the metadata cache as a side effect of creating - the chunk index structures during dataset creation. - - This issue has been fixed by adding callbacks to the different - chunk indexing structure classes that allow more explicit control - over when chunk index metadata gets loaded. When collective - metadata reads are enabled, the necessary index metadata will now - get loaded collectively by all MPI ranks at the start of dataset - I/O to ensure that the ranks don't unintentionally read this - metadata independently further on. These changes fix collective - loading of the main chunk index structure, as well as v2 B-tree - root nodes, extensible array index blocks and fixed array data - blocks. There are still pieces of metadata that cannot currently - be loaded collectively, however, such as extensible array data - blocks, data block pages and super blocks, as well as fixed array - data block pages. These pieces of metadata are not necessarily - read in by all MPI ranks since this depends on which chunks the - ranks have selected in the dataset. Therefore, reading of these - pieces of metadata remains an independent operation. - - - Fixed potential hangs in parallel library during collective I/O with - independent metadata writes - - When performing collective parallel writes to a dataset where metadata - writes are requested as (or left as the default setting of) independent, - hangs could potentially occur during metadata cache sync points. This - was due to incorrect management of the internal state tracking whether - an I/O operation should be collective or not, causing the library to - attempt collective writes of metadata when they were meant to be - independent writes. During the metadata cache sync points, if the number - of cache entries being flushed was a multiple of the number of MPI ranks - in the MPI communicator used to access the HDF5 file, an equal amount of - collective MPI I/O calls were made and the dataset write call would be - successful. However, when the number of cache entries being flushed was - NOT a multiple of the number of MPI ranks, the ranks with more entries - than others would get stuck in an MPI_File_set_view call, while other - ranks would get stuck in a post-write MPI_Barrier call. This issue has - been fixed by correctly switching to independent I/O temporarily when - writing metadata independently during collective dataset I/O. - - - Dropped support for MPI-2 - - The MPI-2 supporting artifacts have been removed due to the cessation - of MPI-2 maintenance and testing since version HDF5 1.12. - - - Fixed a bug with the way the Subfiling VFD assigns I/O concentrators - - During a file open operation, the Subfiling VFD determines the topology - of the application and uses that to select a subset of MPI ranks that - I/O will be forwarded to, called I/O concentrators. The code for this - had previously assumed that the parallel job launcher application (e.g., - mpirun, srun, etc.) would distribute MPI ranks sequentially among a node - until all processors on that node have been assigned before going on to - the next node. When the launcher application mapped MPI ranks to nodes - in a different fashion, such as round-robin, this could cause the Subfiling - VFD to incorrectly map MPI ranks as I/O concentrators, leading to missing - subfiles. - - - Fixed performance regression with some compound type conversions - - In-place type conversion was introduced for most use cases in 1.14.2. - While being able to use the read buffer for type conversion potentially - improves performance by performing the entire I/O at once, it also - disables the optimized compound type conversion used when the destination - is a subset of the source. Disabled in-place type conversion when using - this optimized conversion and there is no benefit in terms of the I/O - size. - - - Fixed an assertion in a previous fix for CVE-2016-4332 - - An assert could fail when processing corrupt files that have invalid - shared message flags (as in CVE-2016-4332). - - The assert statement in question has been replaced with pointer checks - that don't raise errors. Since the function is in cleanup code, we do - our best to close and free things, even when presented with partially - initialized structs. - - Fixes CVE-2016-4332 and HDFFV-9950 (confirmed via the cve_hdf5 repo) - - - Fixed a file space allocation bug in the parallel library for chunked - datasets - - With the addition of support for incremental file space allocation for - chunked datasets with filters applied to them that are created/accessed - in parallel, a bug was introduced to the library's parallel file space - allocation code. This could cause file space to not be allocated correctly - for datasets without filters applied to them that are created with serial - file access and later opened with parallel file access. In turn, this could - cause parallel writes to those datasets to place incorrect data in the file. - - - Fixed an assertion failure in Parallel HDF5 when a file can't be created - due to an invalid library version bounds setting - - An assertion failure could occur in H5MF_settle_raw_data_fsm when a file - can't be created with Parallel HDF5 due to specifying the use of a paged, - persistent file free space manager - (H5Pset_file_space_strategy(..., H5F_FSPACE_STRATEGY_PAGE, 1, ...)) with - an invalid library version bounds combination - (H5Pset_libver_bounds(..., H5F_LIBVER_EARLIEST, H5F_LIBVER_V18)). This - has now been fixed. - - - Fixed bugs in selection I/O - - Previously, the library could fail in some cases when performing selection - I/O with type conversion. - - - Fixed CVE-2018-13867 - - A corrupt file containing an invalid local heap datablock address - could trigger an assert failure when the metadata cache attempted - to load the datablock from storage. - - The local heap now verifies that the datablock address is valid - when the local heap header information is parsed. - - - Fixed CVE-2018-11202 - - A malformed file could result in chunk index memory leaks. Under most - conditions (i.e., when the --enable-using-memchecker option is NOT - used), this would result in a small memory leak and and infinite loop - and abort when shutting down the library. The infinite loop would be - due to the "free list" package not being able to clear its resources - so the library couldn't shut down. When the "using a memory checker" - option is used, the free lists are disabled so there is just a memory - leak with no abort on library shutdown. - - The chunk index resources are now correctly cleaned up when reading - misparsed files and valgrind confirms no memory leaks. - - - Fixed an issue where an assert statement was converted to an - incorrect error check statement - - An assert statement in the library dealing with undefined dataset data - fill values was converted to an improper error check that would always - trigger when a dataset's fill value was set to NULL (undefined). This - has now been fixed. - - - Fixed an assertion failure when attempting to use the Subfiling IOC - VFD directly - - The Subfiling feature makes use of two Virtual File Drivers, the - Subfiling VFD and the IOC (I/O Concentrator) VFD. The two VFDs are - intended to be stacked together such that the Subfiling VFD sits - "on top" of the IOC VFD and routes I/O requests through it; using the - IOC VFD alone is currently unsupported. The IOC VFD has been fixed so - that an error message is displayed in this situation rather than causing - an assertion failure. - - - Fixed a potential bug when copying empty enum datatypes - - Copying an empty enum datatype (including implicitly, as when an enum - is a part of a compound datatype) would fail in an assert in debug - mode and could fail in release mode depending on how the platform - handles undefined behavior regarding size 0 memory allocations and - using memcpy with a NULL src pointer. - - The library is now more careful about using memory operations when - copying empty enum datatypes and will not error or raise an assert. - - - Added an AAPL check to H5Acreate - - A check was added to H5Acreate to ensure that a failure is correctly - returned when an invalid Attribute Access Property List is passed - in to the function. The HDF5 API tests were failing for certain - build types due to this condition not being checked previously. - - - Fixed a bug in H5Ocopy that could generate invalid HDF5 files - - H5Ocopy was missing a check to determine whether the new object's - object header version is greater than version 1. Without this check, - copying of objects with object headers that are smaller than a - certain size would cause H5Ocopy to create an object header for the - new object that has a gap in the header data. According to the - HDF5 File Format Specification, this is not allowed for version - 1 of the object header format. - - Fixes GitHub issue #2653 - - - Fixed H5Pget_vol_cap_flags and H5Pget_vol_id to accept H5P_DEFAULT - - H5Pget_vol_cap_flags and H5Pget_vol_id were updated to correctly - accept H5P_DEFAULT for the 'plist_id' FAPL parameter. Previously, - they would fail if provided with H5P_DEFAULT as the FAPL. - - - Fixed ROS3 VFD anonymous credential usage with h5dump and h5ls - - ROS3 VFD anonymous credential functionality became broken in h5dump - and h5ls in the HDF5 1.14.0 release with the added support for VFD - plugins, which changed the way that the tools handled setting of - credential information that the VFD uses. The tools could be - provided the command-line option of "--s3-cred=(,,)" as a workaround - for anonymous credential usage, but the documentation for this - option stated that anonymous credentials could be used by simply - omitting the option. The latter functionality has been restored. - - Fixes GitHub issue #2406 - - - Fixed memory leaks when processing malformed object header continuation messages - - Malformed object header continuation messages can result in a too-small - buffer being passed to the decode function, which could lead to reading - past the end of the buffer. Additionally, errors in processing these - malformed messages can lead to allocated memory not being cleaned up. - - This fix adds bounds checking and cleanup code to the object header - continuation message processing. - - Fixes GitHub issue #2604 - - - Fixed memory leaks, aborts, and overflows in H5O EFL decode - - The external file list code could call assert(), read past buffer - boundaries, and not properly clean up resources when parsing malformed - external data files messages. - - This fix cleans up allocated memory, adds buffer bounds checks, and - converts asserts to HDF5 error checking. - - Fixes GitHub issue #2605 - - - Fixed potential heap buffer overflow in decoding of link info message - - Detections of buffer overflow were added for decoding version, index - flags, link creation order value, and the next three addresses. The - checkings will remove the potential invalid read of any of these - values that could be triggered by a malformed file. - - Fixes GitHub issue #2603 - - - Memory leak - - Memory leak was detected when running h5dump with "pov". The memory was allocated - via H5FL__malloc() in hdf5/src/H5FL.c - - The fuzzed file "pov" was an HDF5 file containing an illegal continuation message. - When deserializing the object header chunks for the file, memory is allocated for the - array of continuation messages (cont_msg_info->msgs) in continuation message info struct. - As error is encountered in loading the illegal message, the memory allocated for - cont_msg_info->msgs needs to be freed. - - Fixes GitHub issue #2599 - - - Fixed memory leaks that could occur when reading a dataset from a - malformed file - - When attempting to read layout, pline, and efl information for a - dataset, memory leaks could occur if attempting to read pline/efl - information threw an error, which is due to the memory that was - allocated for pline and efl not being properly cleaned up on error. - - Fixes GitHub issue #2602 - - - Fixed potential heap buffer overrun in group info header decoding from malformed file - - H5O__ginfo_decode could sometimes read past allocated memory when parsing a - group info message from the header of a malformed file. - - It now checks buffer size before each read to properly throw an error in these cases. - - Fixes GitHub issue #2601 - - - Fixed potential buffer overrun issues in some object header decode routines - - Several checks were added to H5O__layout_decode and H5O__sdspace_decode to - ensure that memory buffers don't get overrun when decoding buffers read from - a (possibly corrupted) HDF5 file. - - - Fixed a heap buffer overflow that occurs when reading from - a dataset with a compact layout within a malformed HDF5 file - - During opening of a dataset that has a compact layout, the - library allocates a buffer that stores the dataset's raw data. - The dataset's object header that gets written to the file - contains information about how large of a buffer the library - should allocate. If this object header is malformed such that - it causes the library to allocate a buffer that is too small - to hold the dataset's raw data, future I/O to the dataset can - result in heap buffer overflows. To fix this issue, an extra - check is now performed for compact datasets to ensure that - the size of the allocated buffer matches the expected size - of the dataset's raw data (as calculated from the dataset's - dataspace and datatype information). If the two sizes do not - match, opening of the dataset will fail. - - Fixes GitHub issue #2606 - - - Fixed a memory corruption issue that can occur when reading - from a dataset using a hyperslab selection in the file - dataspace and a point selection in the memory dataspace - - When reading from a dataset using a hyperslab selection in - the dataset's file dataspace and a point selection in the - dataset's memory dataspace where the file dataspace's "rank" - is greater than the memory dataspace's "rank", memory corruption - could occur due to an incorrect number of selection points - being copied when projecting the point selection onto the - hyperslab selection's dataspace. - - - Fixed issues in the Subfiling VFD when using the SELECT_IOC_EVERY_NTH_RANK - or SELECT_IOC_TOTAL I/O concentrator selection strategies - - Multiple bugs involving these I/O concentrator selection strategies - were fixed, including: - - * A bug that caused the selection strategy to be altered when - criteria for the strategy was specified in the - H5FD_SUBFILING_IOC_SELECTION_CRITERIA environment variable as - a single value, rather than in the old and undocumented - 'integer:integer' format - * Two bugs which caused a request for 'N' I/O concentrators to - result in 'N - 1' I/O concentrators being assigned, which also - lead to issues if only 1 I/O concentrator was requested - - Also added a regression test for these two I/O concentrator selection - strategies to prevent future issues. - - - Fix CVE-2021-37501 / GHSA-rfgw-5vq3-wrjf - - Check for overflow when calculating on-disk attribute data size. - - A bogus hdf5 file may contain dataspace messages with sizes - which lead to the on-disk data sizes to exceed what is addressable. - When calculating the size, make sure, the multiplication does not - overflow. - The test case was crafted in a way that the overflow caused the - size to be 0. - - Fixes GitHub #2458 - - - Fixed an issue with collective metadata writes of global heap data - - New test failures in parallel netCDF started occurring with debug - builds of HDF5 due to an assertion failure and this was reported in - GitHub issue #2433. The assertion failure began happening after the - collective metadata write pathway in the library was updated to use - vector I/O so that parallel-enabled HDF5 Virtual File Drivers (other - than the existing MPI I/O VFD) can support collective metadata writes. - - The assertion failure was fixed by updating collective metadata writes - to treat global heap metadata as raw data, as done elsewhere in the - library. - - Fixes GitHub issue #2433 - - - Fixed buffer overflow error in image decoding function. - - The error occurred in the function for decoding address from the specified - buffer, which is called many times from the function responsible for image - decoding. The length of the buffer is known in the image decoding function, - but no checks are produced, so the buffer overflow can occur in many places, - including callee functions for address decoding. - - The error was fixed by inserting corresponding checks for buffer overflow. - - Fixes GitHub issue #2432 - - - Reading a H5std_string (std::string) via a C++ DataSet previously - truncated the string at the first null byte as if reading a C string. - Fixed length datasets are now read into H5std_string as a fixed length - string of the appropriate size. Variable length datasets will still be - truncated at the first null byte. - - Fixes Github issue #3034 - - - Fixed write buffer overflow in H5O__alloc_chunk - - The overflow was found by OSS-Fuzz https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=58658 - - - Fixed a segfault when using a user-defined conversion function between compound datatypes - - During type info initialization for compound datatype conversion, the library checked if the - datatypes are subsets of one another in order to perform special conversion handling. - This check uses information that is only defined if a library conversion function is in use. - The library now skips this check for user-defined conversion functions. + - - Fixes Github issue #3840 Java Library ------------ - - Fixed switch case 'L' block missing a break statement. - - The HDF5Array.arrayify method is missing a break statement in the case 'L': section - which causes it to fall through and throw an HDF5JavaException when attempting to - read an Array[Array[Long]]. - - The error was fixed by inserting a break statement at the end of the case 'L': sections. - - Fixes GitHub issue #3056 + - Configuration @@ -1810,230 +148,10 @@ Bug Fixes since HDF5-1.14.0 release Fixes GitHub issue #4811 - - Fixed usage issue with FindZLIB.cmake module - - When building HDF5 with CMake and relying on the FindZLIB.cmake module, - the Find module would correctly find the ZLIB library but not set an OUTPUT_NAME - on the target. Also, the target returned, ZLIB::ZLIB, was not in the ZLIB_LIBRARIES - variable. This caused issues when requesting the OUTPUT_NAME of the target in - the pkg-config settings. - - Similar to HDF5_USE_LIBAEC_STATIC, "Find static AEC library", option, we added - a new option, HDF5_USE_ZLIB_STATIC, "Find static zlib library". These options - allow a user to specify whether to use a static or shared version of the compression - library in a find_package call. - - - Corrected usage of FetchContent in the HDFLibMacros.cmake file. - - CMake version 3.30 changed the behavior of the FetchContent module to deprecate - the use of FetchContent_Populate() in favor of FetchContent_MakeAvailable(). Therefore, - the copying of HDF specialized CMakeLists.txt files to the dependent project's source - was implemented in the FetchContent_Declare() call. - - - Fixed/reverted an Autotools configure hack that causes problems on MacOS - - A sed line in configure.ac was added in the past to paper over some - problems with older versions of the Autotools that would add incorrect - linker flags. This used the -i option in a way that caused silent - errors on MacOS that did not break the build. - - The original fix for this problem (in 1.14.4) removed the sed line - entirely, but it turns out that the sed cleanup is still necessary - on some systems, where empty -l options will be added to the libtool - script. - - This sed line has been restored and reworked to not use -i. - - Fixes GitHub issues #3843 and #4448 - - - Fixed a list index out of range issue in the runTest.cmake file - - Fixed an issue in config/cmake/runTest.cmake where the CMake logic - would try to access an invalid list index if the number of lines in - a test's output and reference files don't match - - - Fix Autotools -Werror cleanup - - The Autotools temporarily scrub -Werror(=whatever) from CFLAGS, etc. - so configure checks don't trip over warnings generated by configure - check programs. The sed line originally only scrubbed -Werror but not - -Werror=something, which would cause errors when the '=something' was - left behind in CFLAGS. - - The sed line has been updated to handle -Werror=something lines. - - Fixes one issue raised in #3872 - - - Changed default of 'Error on HDF5 doxygen warnings' DOXYGEN_WARN_AS_ERROR option. - - The default setting of DOXYGEN_WARN_AS_ERROR to 'FAIL_ON_WARNINGS' has been changed - to 'NO'. It was decided that the setting was too aggressive and should be a user choice. - The github actions and scripts have been updated to reflect this. - - * HDF5_ENABLE_DOXY_WARNINGS: ON/OFF (Default: OFF) - * --enable-doxygen-errors: enable/disable (Default: disable) - - - Fixed an issue where the h5tools_test_utils test program was being - installed on the system for Autotools builds of HDF5 - - The h5tools_test_utils test program was mistakenly added to bin_PROGRAMS - in its Makefile.am configuration file, causing the executable to be - installed on the system. The executable is now added to noinst_PROGRAMS - instead and will no longer be installed on the system for Autotools builds - of HDF5. The CMake configuration code already avoids installing the - executable on the system. - - - Fixed a configuration issue that prevented building of the Subfiling VFD on macOS - - Checks were added to the CMake and Autotools code to verify that CLOCK_MONOTONIC_COARSE, - PTHREAD_MUTEX_ADAPTIVE_NP and pthread_condattr_setclock() are available before attempting - to use them in Subfiling VFD-related utility code. Without these checks, attempting - to build the Subfiling VFD on macOS would fail. - - - Fixes the ordering of INCLUDES when building with CMake - - Include directories in the source or build tree should come before other - directories to prioritize headers in the sources over installed ones. - - Fixes GitHub #1027 - - - The accum test now passes on macOS 12+ (Monterey) w/ CMake - - Due to changes in the way macOS handles LD_LIBRARY_PATH, the accum test - started failing on macOS 12+ when building with CMake. CMake has been - updated to set DYLD_LIBRARY_PATH on macOS and the test now passes. - - Fixes GitHub #2994, #2261, and #1289 - - - Changed the default settings used by CMake for the GZIP filter - - The default for the option HDF5_ENABLE_Z_LIB_SUPPORT was OFF. Now the default is ON. - This was done to match the defaults used by the autotools configure.ac. - In addition, the CMake message level for not finding a suitable filter library was - changed from FATAL_ERROR (which would halt the build process) to WARNING (which - will print a message to stderr). Associated files and documentation were changed to match. - - In addition, the default settings in the config/cmake/cacheinit.cmake file were changed to - allow CMake to disable building the filters if the tgz file could not be found. The option - to allow CMake to download the file from the original Github location requires setting - the ZLIB_USE_LOCALCONTENT option to OFF for gzip. And setting the LIBAEC_USE_LOCALCONTENT - option to OFF for libaec (szip). - - Fixes GitHub issue #2926 - - - Fixed syntax of generator expressions used by CMake - - Add quotes around the generator expression should allow CMake to - correctly parse the expression. Generator expressions are typically - parsed after command arguments. If a generator expression contains - spaces, new lines, semicolons or other characters that may be - interpreted as command argument separators, the whole expression - should be surrounded by quotes when passed to a command. Failure to - do so may result in the expression being split and it may no longer - be recognized as a generator expression. - - Fixes GitHub issue #2906 - - - Fixed improper include of Subfiling VFD build directory - - With the release of the Subfiling Virtual File Driver feature, compiler - flags were added to the Autotools build's CPPFLAGS and AM_CPPFLAGS - variables to always include the Subfiling VFD source code directory, - regardless of whether the VFD is enabled and built or not. These flags - are needed because the header files for the VFD contain macros that are - assumed to always be available, such as H5FD_SUBFILING_NAME, so the - header files are unconditionally included in the HDF5 library. However, - these flags are only needed when building HDF5, so they belong in the - H5_CPPFLAGS variable instead. Inclusion in the CPPFLAGS and AM_CPPFLAGS - variables would export these flags to the h5cc and h5c++ wrapper scripts, - as well as the libhdf5.settings file, which would break builds of software - that use HDF5 and try to use or parse information out of these files after - deleting temporary HDF5 build directories. - - Fixes GitHub issue #2621 - - - Correct the CMake generated pkg-config file - - The pkg-config file generated by CMake had the order and placement of the - libraries wrong. Also added support for debug library names. - - Changed the order of Libs.private libraries so that dependencies come after - dependents. Did not move the compression libraries into Requires.private - because there was not a way to determine if the compression libraries had - supported pkconfig files. Still recommend that the CMake config file method - be used for building projects with CMake. - - Fixes GitHub issues #1546 and #2259 - - - Force lowercase Fortran module file names - - The Cray Fortran compiler uses uppercase Fortran module file names, which - caused CMake installs to fail. A compiler option was added to use lowercase - instead. - Tools ----- - - Fixed several issues in ph5diff - - The parallel logic for the ph5diff tool inside the shared h5diff code was - refactored and cleaned up to fix several issues with the ph5diff tool. This - fixed: - - - several concurrency issues in ph5diff that can result in interleaved - output - - an issue where output can sometimes be dropped when it ends up in - ph5diff's output overflow file - - an issue where MPI_Init was called after HDF5 had been initialized, - preventing the library from setting up an MPI communicator attribute - to perform library cleanup on MPI_Finalize - - - Renamed h5fuse.sh to h5fuse - - Addresses Discussion #3791 - - - Fixed an issue with unmatched MPI messages in ph5diff - - The "manager" MPI rank in ph5diff was unintentionally sending "program end" - messages to its workers twice, leading to an error from MPICH similar to the - following: - - Abort(810645519) on node 1 (rank 1 in comm 0): Fatal error in internal_Finalize: Other MPI error, error stack: - internal_Finalize(50)...........: MPI_Finalize failed - MPII_Finalize(394)..............: - MPIR_Comm_delete_internal(1224).: Communicator (handle=44000000) being freed has 1 unmatched message(s) - MPIR_Comm_release_always(1250)..: - MPIR_finalize_builtin_comms(154): - - - Fixed an issue in h5repack for variable-length typed datasets - - When repacking datasets into a new file, h5repack tries to determine whether - it can use H5Ocopy to copy each dataset into the new file, or if it needs to - manually re-create the dataset, then read data from the old dataset and write - it to the new dataset. H5repack was previously using H5Ocopy for datasets with - variable-length datatypes, but this can be problematic if the global heap - addresses involved do not match exactly between the old and new files. These - addresses could change for a variety of reasons, such as the command-line options - provided to h5repack, how h5repack allocate space in the repacked file, etc. - Since H5Ocopy does not currently perform any translation when these addresses - change, datasets that were repacked with H5Ocopy could become unreadable in the - new file. H5repack has been fixed to repack variable-length typed datasets without - using H5Ocopy to ensure that the new datasets always have the correct global heap - addresses. - - - Names of objects with square brackets will have trouble without the - special argument, --no-compact-subset, on the h5dump command line. - - h5diff did not have this option and now it has been added. - - Fixes GitHub issue #2682 - - - In the tools traverse function - an error in either visit call - will bypass the cleanup of the local data variables. - - Replaced the H5TOOLS_GOTO_ERROR with just H5TOOLS_ERROR. - - Fixes GitHub issue #2598 + - Performance @@ -2043,25 +161,12 @@ Bug Fixes since HDF5-1.14.0 release Fortran API ----------- - - Fixed: HDF5 fails to compile with -Werror=lto-type-mismatch - - Removed the use of the offending C stub wrapper. - - Fixes GitHub issue #3987 + - High-Level Library ------------------ - - Fixed a memory leak in H5LTopen_file_image with H5LT_FILE_IMAGE_DONT_COPY flag - - When the H5LT_FILE_IMAGE_DONT_COPY flag is passed to H5LTopen_file_image, the - internally-allocated udata structure gets leaked as the core file driver doesn't - have a way to determine when or if it needs to call the "udata_free" callback. - This has been fixed by freeing the udata structure when the "image_free" callback - gets made during file close, where the file is holding the last reference to the - udata structure. - - Fixes GitHub issue #827 + - Fortran High-Level APIs @@ -2086,17 +191,6 @@ Bug Fixes since HDF5-1.14.0 release Testing ------- - - Fixed a bug in the dt_arith test when H5_WANT_DCONV_EXCEPTION is not - defined - - The dt_arith test program's test_particular_fp_integer sub-test tries - to ensure that the library correctly raises a datatype conversion - exception when converting a floating-point value to an integer overflows. - However, this test would run even when H5_WANT_DCONV_EXCEPTION isn't - defined, causing the test to fail due to the library not raising - datatype conversion exceptions. This has now been fixed by not running - the test when H5_WANT_DCONV_EXCEPTION is not defined. - - Disabled running of MPI Atomicity tests for OpenMPI major versions < 5 Support for MPI atomicity operations is not implemented for major @@ -2106,142 +200,106 @@ Bug Fixes since HDF5-1.14.0 release skip running the atomicity tests if the major version of OpenMPI is < 5. - - Fixed a testing failure in testphdf5 on Cray machines - - On some Cray machines, what appears to be a bug in Cray MPICH was causing - calls to H5Fis_accessible to create a 0-byte file with strange Unix - permissions. This was causing an H5Fdelete file deletion test in the - testphdf5 program to fail due to a just-deleted HDF5 file appearing to - still be accessible on the file system. The issue in Cray MPICH has been - worked around for the time being by resetting the MPI_Info object on the - File Access Property List used to MPI_INFO_NULL before passing it to the - H5Fis_accessible call. - - - A bug was fixed in the HDF5 API test random datatype generation code - - A bug in the random datatype generation code could cause test failures - when trying to generate an enumeration datatype that has duplicated - name/value pairs in it. This has now been fixed. - - - A bug was fixed in the HDF5 API test VOL connector registration checking code - - The HDF5 API test code checks to see if the VOL connector specified by the - HDF5_VOL_CONNECTOR environment variable (if any) is registered with the library - before attempting to run tests with it so that testing can be skipped and an - error can be returned when a VOL connector fails to register successfully. - Previously, this code didn't account for VOL connectors that specify extra - configuration information in the HDF5_VOL_CONNECTOR environment variable and - would incorrectly report that the specified VOL connector isn't registered - due to including the configuration information as part of the VOL connector - name being checked for registration status. This has now been fixed. - - - Fixed Fortran 2003 test with gfortran-v13, optimization levels O2,O3 - - Fixes failing Fortran 2003 test with gfortran, optimization level O2,O3 - with -fdefault-real-16. Fixes GH #2928. - Platforms Tested =================== - - HDF5 supports the latest macOS versions, including the current and two - preceding releases. As new major macOS versions become available, HDF5 - will discontinue support for the oldest version and add the latest - version to its list of compatible systems, along with the previous two - releases. - - Linux 5.16.14-200.fc35 GNU gcc (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) - #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9) - Fedora35 clang version 13.0.0 (Fedora 13.0.0-3.fc35) + - HDF5 is tested with the two latest macOS versions that are available + on github runners. As new major macOS versions become available, HDF5 + will discontinue support for the older version and add the new latest + version to its list of compatible systems, along with the previous + version. + + Linux 6.8.0-1010-aws GNU gcc, gfortran, g++ + #10-Ubuntu SMP 2024 x86_64 (Ubuntu 13.2.0-23ubuntu4) 13.2.0 + GNU/Linux Ubuntu 24.04 Ubuntu clang version 18.1.3 (1ubuntu1) + Intel(R) oneAPI DPC++/C++ Compiler 2024.2.0 + ifx (IFX) 2024.2.0 20240602 (cmake and autotools) - Linux 5.19.0-1027-aws GNU gcc (GCC) 11.3.0-1ubuntu1 - #36-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 11.3.0-1ubuntu1 - Ubuntu 22.04 Intel oneAPI DPC++/C++ Compiler, IFX 2023.1.0 - Ubuntu clang version 14.0.0-1ubuntu1 + Linux 6.5.0-1018-aws GNU gcc, gfortran, g++ + #18-Ubuntu SMP x86_64 GNU/Linux (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 + Ubuntu 22.04 Ubuntu clang version 14.0.0-1ubuntu1 + Intel(R) oneAPI DPC++/C++ Compiler 2024.0.2 + ifx (IFX) 2024.0.2 20231213 (cmake and autotools) - Linux 5.15.0-1037-aws GNU gcc (GCC) 9.4.0-1ubuntu1 - #36-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 9.4.0-1ubuntu1 - Ubuntu 20.04 Intel oneAPI DPC++/C++ Compiler, IFX 2023.1.0 - Ubuntu clang version 10.0.0-4ubuntu1 + Linux 5.14.21-cray_shasta_c cray-mpich/8.1.28 + #1 SMP x86_64 GNU/Linux cce/15.0.0 + (frontier) gcc/13.2 + (cmake) + + Linux 5.14.0-427.24.1.el9_4 GNU gcc, gfortran, g++ (Red Hat 11.4.1-3) + #1 SMP x86_64 GNU/Linux clang version 17.0.6 + Rocky 9 Intel(R) oneAPI DPC++/C++ Compiler 2024.2.0 + ifx (IFX) 2024.2.0 (cmake and autotools) - Linux 5.14.21-cray_shasta_c cray-mpich/8.1.25 - #1 SMP x86_64 GNU/Linux cce 15.0.1 - (perlmutter) GCC 12.2.0 - intel-oneapi/2023.1.0 - nvidia/22.7 + Linux-4.18.0-553.16.1.1toss.t4 openmpi/4.1.2 + #1 SMP x86_64 GNU/Linux clang 14.0.6 + (corona, dane) GCC 12.1.1 + Intel(R) oneAPI DPC++/C++ Compiler 2023.2.1 + ifx (IFX) 2023.2.1 + + Linux-4.18.0-553.5.1.1toss.t4 openmpi/4.1/4.1.6 + #1 SMP x86_64 GNU/Linux clang 16.0.6 + (eclipse) GCC 12.3.0 + Intel(R) oneAPI DPC++/C++ Compiler 2024.0.2 + ifx (IFX) 2024.0.2 (cmake) - Linux 5.14.21-cray_shasta_c cray-mpich/8.1.23 - #1 SMP x86_64 GNU/Linux cce 15.0.1 - (crusher) GCC 12.2.0 + Linux 4.14.0-115.35.1.3chaos spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 17.0.6 + (vortex) GCC 12.2.1 + nvhpc 24.1 + XL 2023.06.28 (cmake) - Linux-4.14.0-115.21.2 spectrum-mpi/rolling-release - #1 SMP ppc64le GNU/Linux clang 12.0.1, 14.0.5 + Linux-4.14.0-115.35.1 spectrum-mpi/rolling-release + #1 SMP ppc64le GNU/Linux clang 14.0.5, 15.0.6 (lassen) GCC 8.3.1 - XL 16.1.1.2, 2021,09.22, 2022.08.05 + XL 2021.09.22, 2022.08.05 (cmake) - Linux-4.12.14-197.99-default cray-mpich/7.7.14 - #1 SMP x86_64 GNU/Linux cce 12.0.3 - (theta) GCC 11.2.0 - llvm 9.0 - Intel 19.1.2 - Linux 3.10.0-1160.36.2.el7.ppc64 gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) #1 SMP ppc64be GNU/Linux g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) Power8 (echidna) GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39) - IBM XL C for Linux, V13.1 - IBM XL Fortran for Linux, V15.1 - Linux 3.10.0-1160.24.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) + Linux 3.10.0-1160.80.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++) #1 SMP x86_64 GNU/Linux compilers: Centos7 Version 4.8.5 20150623 (Red Hat 4.8.5-4) - (jelly/kituo/moohan) Version 4.9.3, Version 5.3.0, Version 6.3.0, - Version 7.2.0, Version 8.3.0, Version 9.1.0 - Version 10.2.0 + (jelly/kituo/moohan) Version 4.9.3, Version 7.2.0, Version 8.3.0, + Version 9.1.0, Version 10.2.0 Intel(R) C (icc), C++ (icpc), Fortran (icc) compilers: Version 17.0.0.098 Build 20160721 GNU C (gcc) and C++ (g++) 4.8.5 compilers - with NAG Fortran Compiler Release 6.1(Tozai) + with NAG Fortran Compiler Release 7.1(Hanzomon) Intel(R) C (icc) and C++ (icpc) 17.0.0.098 compilers - with NAG Fortran Compiler Release 6.1(Tozai) + with NAG Fortran Compiler Release 7.1(Hanzomon) + MPICH 3.1.4 compiled with GCC 4.9.3 MPICH 3.3 compiled with GCC 7.2.0 - MPICH 4.0.3 compiled with GCC 7.2.0 - OpenMPI 3.1.3 compiled with GCC 7.2.0 - OpenMPI 4.1.2 compiled with GCC 9.1.0 + OpenMPI 3.1.3 compiled with GCC 7.2.0 and 4.1.2 + compiled with GCC 9.1.0 PGI C, Fortran, C++ for 64-bit target on x86_64; - Version 19.10-0 - NVIDIA C, Fortran, C++ for 64-bit target on - x86_64; - Version 22.5-0 + Versions 18.4.0 and 19.10-0 + NVIDIA nvc, nvfortran and nvc++ version 22.5-0 (autotools and cmake) - Linux-3.10.0-1160.0.0.1chaos openmpi-4.1.2 - #1 SMP x86_64 GNU/Linux clang 6.0.0, 11.0.1 - (quartz) GCC 7.3.0, 8.1.0 - Intel 19.0.4, 2022.2, oneapi.2022.2 - macOS Apple M1 11.6 Apple clang version 12.0.5 (clang-1205.0.22.11) - Darwin 20.6.0 arm64 gfortran GNU Fortran (Homebrew GCC 11.2.0) 11.1.0 - (macmini-m1) Intel icc/icpc/ifort version 2021.3.0 202106092021.3.0 20210609 - - macOS Big Sur 11.3.1 Apple clang version 12.0.5 (clang-1205.0.22.9) - Darwin 20.4.0 x86_64 gfortran GNU Fortran (Homebrew GCC 10.2.0_3) 10.2.0 - (bigsur-1) Intel icc/icpc/ifort version 2021.2.0 20210228 - - macOS High Sierra 10.13.6 Apple LLVM version 10.0.0 (clang-1000.10.44.4) - 64-bit gfortran GNU Fortran (GCC) 6.3.0 - (bear) Intel icc/icpc/ifort version 19.0.4.233 20190416 + Linux-3.10.0-1160.119.1.1chaos openmpi/4.1.4 + #1 SMP x86_64 GNU/Linux clang 16.0.6 + (skybridge) Intel(R) oneAPI DPC++/C++ Compiler 2023.2.0 + ifx (IFX) 2023.2.0 + (cmake) - Mac OS X El Capitan 10.11.6 Apple clang version 7.3.0 from Xcode 7.3 - 64-bit gfortran GNU Fortran (GCC) 5.2.0 - (osx1011test) Intel icc/icpc/ifort version 16.0.2 + Linux-3.10.0-1160.90.1.1chaos openmpi/4.1 + #1 SMP x86_64 GNU/Linux clang 16.0.6 + (attaway) GCC 12.1.0 + Intel(R) oneAPI DPC++/C++ Compiler 2024.0.2 + ifx (IFX) 2024.0.2 + (cmake) Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++) #1 SMP x86_64 GNU/Linux compilers: @@ -2255,9 +313,9 @@ Platforms Tested Windows 10 x64 Visual Studio 2019 w/ clang 12.0.0 with MSVC-like command-line (C/C++ only - cmake) Visual Studio 2019 w/ Intel (C/C++ only - cmake) - Visual Studio 2022 w/ clang 15.0.1 + Visual Studio 2022 w/ clang 17.0.3 with MSVC-like command-line (C/C++ only - cmake) - Visual Studio 2022 w/ Intel C/C++/Fortran oneAPI 2023 (cmake) + Visual Studio 2022 w/ Intel C/C++ oneAPI 2023 (cmake) Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake) diff --git a/release_docs/RELEASE_PROCESS.md b/release_docs/RELEASE_PROCESS.md index c3e106d267a..fd21bfb4d4b 100644 --- a/release_docs/RELEASE_PROCESS.md +++ b/release_docs/RELEASE_PROCESS.md @@ -182,13 +182,19 @@ For more information on the HDF5 versioning and backward and forward compatibili 3. Run `bin/release` (similar to 8.2) and commit all the changed files. 4. Select release build from workflow. - Choose the release branch - - Change ‘Release version tag’ name to 'hdf5_X.Y.Z' + - Change ‘Release version tag’ name to 'hdf5_X.Y.Z' - Press "Run Workflow" 5. Review the release files in Github 6. Edit the Github Release and change status to Release - Change status from Pre-release to Release +7. Select publish-release build from workflow. + - Choose the release branch + - Change ‘HDF5 Release version tag’ name to 'hdf5_X.Y.Z' + - Change 'HDF5 Release file name base' to 'hdf5-X.Y.Z' + - Change 'HDF5 target bucket directory' to 'vX_Y/vX_Y_Z' + - Press "Run Workflow" -### 10. Add the contents of the RELEASE.txt file in the release code to the HISTORY file in the **support** branch, just below the introductory lines at the top of the HISTORY file. +### 10. Add the contents of the RELEASE.txt file in the release code to the HISTORY- file in the **support** branch, just below the introductory lines at the top of the HISTORY file. ### 11. Conduct Release Retrospective (Release Manager) 1. Schedule time and solicit comments from retrospective @@ -207,5 +213,5 @@ For more information on the HDF5 versioning and backward and forward compatibili [u11]: https://github.com/HDFGroup/hdf5/blob/develop/src/CMakeLists.txt [u12]: https://github.com/HDFGroup/hdf5/blob/develop/configure.ac [u13]: https://hdfgroup.github.io/hdf5/develop/api-compat-macros.html -[u14]: https://github.com/HDFGroup/hdf5/releases/tag/snapshot-1.14 +[u14]: https://github.com/HDFGroup/hdf5/releases/tag/snapshot-1.16 [u15]: https://github.com/HDFGroup/hdf5/releases/tag/snapshot diff --git a/release_docs/USING_CMake_Examples.txt b/release_docs/USING_CMake_Examples.txt index 31bb4dc92c4..956f9ec16d7 100644 --- a/release_docs/USING_CMake_Examples.txt +++ b/release_docs/USING_CMake_Examples.txt @@ -21,7 +21,7 @@ I. Preconditions ======================================================================== 1. We suggest you obtain the latest CMake for your platform from the Kitware - web site. The HDF5 1.15.x product requires a minimum CMake version + web site. The HDF5 1.17.x product requires a minimum CMake version of 3.18. If you are using VS2022, the minimum CMake version is 3.21. 2. You have installed the HDF5 library built with CMake, by executing diff --git a/release_docs/USING_HDF5_CMake.txt b/release_docs/USING_HDF5_CMake.txt index 8b78e7d2db8..8ff240ded25 100644 --- a/release_docs/USING_HDF5_CMake.txt +++ b/release_docs/USING_HDF5_CMake.txt @@ -38,7 +38,7 @@ I. Preconditions ======================================================================== 1. We suggest you obtain the latest CMake for your platform from the Kitware - web site. The HDF5 1.15.x product requires a minimum CMake version + web site. The HDF5 1.17.x product requires a minimum CMake version of 3.18. If you are using VS2022, the minimum CMake version is 3.21. 2. You have installed the HDF5 library built with CMake, by executing @@ -50,24 +50,24 @@ I. Preconditions or environment variable, set(ENV{HDF5_ROOT} "") to the installed location of HDF5. On Windows: - HDF5_ROOT=C:/Program Files/HDF_Group/HDF5/1.15.x/ + HDF5_ROOT=C:/Program Files/HDF_Group/HDF5/1.17.x/ On unix: - HDF5_ROOT=/HDF_Group/HDF5/1.15.x/ + HDF5_ROOT=/HDF_Group/HDF5/1.17.x/ If you are using shared libraries, you may need to add to the path environment variable. Set the path environment variable to the installed location of the library files for HDF5. On Windows (*.dll): - PATH=%PATH%;C:/Program Files/HDF_Group/HDF5/1.15.x/bin + PATH=%PATH%;C:/Program Files/HDF_Group/HDF5/1.17.x/bin On unix (*.so): - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/HDF_Group/HDF5/1.15.x/lib + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/HDF_Group/HDF5/1.17.x/lib If you are using filter plugin libraries, you will need to set the HDF5_PLUGIN_PATH environment variable. On Windows: - HDF5_PLUGIN_PATH=C:/Program Files/HDF_Group/HDF5/1.15.x/lib/plugin + HDF5_PLUGIN_PATH=C:/Program Files/HDF_Group/HDF5/1.17.x/lib/plugin On unix: - HDF5_PLUGIN_PATH=/HDF_Group/HDF5/1.15.x/lib/plugin + HDF5_PLUGIN_PATH=/HDF_Group/HDF5/1.17.x/lib/plugin (Note there are no quote characters used on Windows and all platforms use forward slashes) diff --git a/release_docs/USING_HDF5_VS.txt b/release_docs/USING_HDF5_VS.txt index 6d69a143bd4..f13cdfadc2a 100644 --- a/release_docs/USING_HDF5_VS.txt +++ b/release_docs/USING_HDF5_VS.txt @@ -62,11 +62,11 @@ Using Visual Studio 2008 with HDF5 Libraries built with Visual Studio 2008 and select "x64". 2.2 Find the box "Show directories for", choose "Include files", add the - header path (i.e. c:\Program Files\HDF_Group\HDF5\1.15.x\include) + header path (i.e. c:\Program Files\HDF_Group\HDF5\1.17.x\include) to the included directories. 2.3 Find the box "Show directories for", choose "Library files", add the - library path (i.e. c:\Program Files\HDF_Group\HDF5\1.15.x\lib) + library path (i.e. c:\Program Files\HDF_Group\HDF5\1.17.x\lib) to the library directories. 2.4 If using Fortran libraries, you will also need to setup the path diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 47fc1dcf751..438d60fffc7 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -687,6 +687,8 @@ set (H5VL_SOURCES ${HDF5_SRC_DIR}/H5VLnative_object.c ${HDF5_SRC_DIR}/H5VLnative_token.c ${HDF5_SRC_DIR}/H5VLpassthru.c + ${HDF5_SRC_DIR}/H5VLpassthru_int.c + ${HDF5_SRC_DIR}/H5VLquery.c ${HDF5_SRC_DIR}/H5VLtest.c ) set (H5VL_HDRS diff --git a/src/H5A.c b/src/H5A.c index 10475919fbe..6c9cfd9f50d 100644 --- a/src/H5A.c +++ b/src/H5A.c @@ -125,7 +125,7 @@ H5A__create_common(H5VL_object_t *vol_obj, H5VL_loc_params_t *loc_params, const HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, H5I_INVALID_HID, "unable to create attribute"); /* Register the new attribute and get an ID for it */ - if ((ret_value = H5VL_register(H5I_ATTR, attr, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_ATTR, attr, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register attribute for ID"); done: @@ -267,7 +267,7 @@ H5Acreate_async(const char *app_file, const char *app_func, unsigned app_line, h /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE10(__func__, "*s*sIui*siiiii", app_file, app_func, app_line, loc_id, attr_name, type_id, space_id, acpl_id, aapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) @@ -413,7 +413,7 @@ H5Acreate_by_name_async(const char *app_file, const char *app_func, unsigned app /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE12(__func__, "*s*sIui*s*siiiiii", app_file, app_func, app_line, loc_id, obj_name, attr_name, type_id, space_id, acpl_id, aapl_id, lapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) @@ -454,7 +454,7 @@ H5A__open_common(H5VL_object_t *vol_obj, H5VL_loc_params_t *loc_params, const ch HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open attribute: '%s'", attr_name); /* Register the attribute and get an ID for it */ - if ((ret_value = H5VL_register(H5I_ATTR, attr, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_ATTR, attr, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register attribute for ID"); done: @@ -574,7 +574,7 @@ H5Aopen_async(const char *app_file, const char *app_func, unsigned app_line, hid /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIui*sii", app_file, app_func, app_line, loc_id, attr_name, aapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) @@ -702,7 +702,7 @@ H5Aopen_by_name_async(const char *app_file, const char *app_func, unsigned app_l /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE9(__func__, "*s*sIui*s*siii", app_file, app_func, app_line, loc_id, obj_name, attr_name, aapl_id, lapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) @@ -839,7 +839,7 @@ H5Aopen_by_idx_async(const char *app_file, const char *app_func, unsigned app_li /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE11(__func__, "*s*sIui*sIiIohiii", app_file, app_func, app_line, loc_id, obj_name, idx_type, order, n, aapl_id, lapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) @@ -949,7 +949,7 @@ H5Awrite_async(const char *app_file, const char *app_func, unsigned app_line, hi /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIuii*xi", app_file, app_func, app_line, attr_id, dtype_id, buf, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1055,7 +1055,7 @@ H5Aread_async(const char *app_file, const char *app_func, unsigned app_line, hid /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIuii*xi", app_file, app_func, app_line, attr_id, dtype_id, buf, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1678,7 +1678,7 @@ H5Arename_async(const char *app_file, const char *app_func, unsigned app_line, h /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIui*s*si", app_file, app_func, app_line, loc_id, old_name, new_name, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1792,7 +1792,7 @@ H5Arename_by_name_async(const char *app_file, const char *app_func, unsigned app /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE9(__func__, "*s*sIui*s*s*sii", app_file, app_func, app_line, loc_id, obj_name, old_attr_name, new_attr_name, lapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -2214,11 +2214,11 @@ H5Aclose(hid_t attr_id) herr_t H5Aclose_async(const char *app_file, const char *app_func, unsigned app_line, hid_t attr_id, hid_t es_id) { - H5VL_object_t *vol_obj = NULL; /* Object for loc_id */ - H5VL_t *connector = NULL; /* VOL connector */ - void *token = NULL; /* Request token for async operation */ - void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_object_t *vol_obj = NULL; /* Object for loc_id */ + H5VL_connector_t *connector = NULL; /* VOL connector */ + void *token = NULL; /* Request token for async operation */ + void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -2234,7 +2234,7 @@ H5Aclose_async(const char *app_file, const char *app_func, unsigned app_line, hi /* Increase connector's refcount, so it doesn't get closed if closing * the attribute closes the file */ - connector = vol_obj->connector; + connector = H5VL_OBJ_CONNECTOR(vol_obj); H5VL_conn_inc_rc(connector); /* Point at token for operation to set up */ @@ -2250,7 +2250,7 @@ H5Aclose_async(const char *app_file, const char *app_func, unsigned app_line, hi /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, attr_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -2401,7 +2401,7 @@ H5Aexists_async(const char *app_file, const char *app_func, unsigned app_line, h /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIui*s*bi", app_file, app_func, app_line, obj_id, attr_name, attr_exists, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -2514,7 +2514,7 @@ H5Aexists_by_name_async(const char *app_file, const char *app_func, unsigned app /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE9(__func__, "*s*sIui*s*s*bii", app_file, app_func, app_line, loc_id, obj_name, attr_name, attr_exists, lapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); diff --git a/src/H5Adense.c b/src/H5Adense.c index 52a6244d7be..143fa9b3646 100644 --- a/src/H5Adense.c +++ b/src/H5Adense.c @@ -633,8 +633,8 @@ H5A__dense_write_bt2_cb(void *_record, void *_op_data, bool *changed) udata.found_op_data = NULL; /* Modify record for creation order index */ - if (H5B2_modify(bt2_corder, &udata, H5A__dense_write_bt2_cb2, &op_data->attr->sh_loc.u.heap_id) < - 0) + if (H5B2_modify(bt2_corder, &udata, false, H5A__dense_write_bt2_cb2, + &op_data->attr->sh_loc.u.heap_id) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, FAIL, "unable to modify record in v2 B-tree"); } /* end if */ @@ -763,7 +763,7 @@ H5A__dense_write(H5F_t *f, const H5O_ainfo_t *ainfo, H5A_t *attr) op_data.corder_bt2_addr = ainfo->corder_bt2_addr; /* Modify attribute through 'name' tracking v2 B-tree */ - if (H5B2_modify(bt2_name, &udata, H5A__dense_write_bt2_cb, &op_data) < 0) + if (H5B2_modify(bt2_name, &udata, false, H5A__dense_write_bt2_cb, &op_data) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, FAIL, "unable to modify record in v2 B-tree"); done: diff --git a/src/H5Adeprec.c b/src/H5Adeprec.c index a63470a0eca..0d878d34e48 100644 --- a/src/H5Adeprec.c +++ b/src/H5Adeprec.c @@ -138,7 +138,7 @@ H5Acreate1(hid_t loc_id, const char *name, hid_t type_id, hid_t space_id, hid_t HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, H5I_INVALID_HID, "unable to create attribute"); /* Register the new attribute and get an ID for it */ - if ((ret_value = H5VL_register(H5I_ATTR, attr, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_ATTR, attr, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register attribute for ID"); done: @@ -202,7 +202,7 @@ H5Aopen_name(hid_t loc_id, const char *name) HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open attribute"); /* Register the attribute and get an ID for it */ - if ((ret_value = H5VL_register(H5I_ATTR, attr, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_ATTR, attr, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register attribute handle"); done: @@ -269,7 +269,7 @@ H5Aopen_idx(hid_t loc_id, unsigned idx) HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open attribute"); /* Register the attribute and get an ID for it */ - if ((ret_value = H5VL_register(H5I_ATTR, attr, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_ATTR, attr, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register attribute handle"); done: diff --git a/src/H5Aint.c b/src/H5Aint.c index a459402f589..1f1f12437b7 100644 --- a/src/H5Aint.c +++ b/src/H5Aint.c @@ -96,6 +96,7 @@ const unsigned H5O_attr_ver_bounds[] = { H5O_ATTR_VERSION_3, /* H5F_LIBVER_V112 */ H5O_ATTR_VERSION_3, /* H5F_LIBVER_V114 */ H5O_ATTR_VERSION_3, /* H5F_LIBVER_V116 */ + H5O_ATTR_VERSION_3, /* H5F_LIBVER_V118 */ H5O_ATTR_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Apublic.h b/src/H5Apublic.h index 7c28c0a24a2..97d3a204cc2 100644 --- a/src/H5Apublic.h +++ b/src/H5Apublic.h @@ -153,8 +153,8 @@ H5_DLL hid_t H5Acreate_async(const char *app_file, const char *app_func, unsigne const char *attr_name, hid_t type_id, hid_t space_id, hid_t acpl_id, hid_t aapl_id, hid_t es_id); #else -H5_DLL hid_t H5Acreate_async(hid_t loc_id, const char *attr_name, hid_t type_id, hid_t space_id, - hid_t acpl_id, hid_t aapl_id, hid_t es_id); +H5_DLL hid_t H5Acreate_async(hid_t loc_id, const char *attr_name, hid_t type_id, hid_t space_id, + hid_t acpl_id, hid_t aapl_id, hid_t es_id); #endif /*--------------------------------------------------------------------------*/ @@ -769,7 +769,7 @@ H5_DLL hid_t H5Aopen(hid_t obj_id, const char *attr_name, hid_t aapl_id); H5_DLL hid_t H5Aopen_async(const char *app_file, const char *app_func, unsigned app_line, hid_t obj_id, const char *attr_name, hid_t aapl_id, hid_t es_id); #else -H5_DLL hid_t H5Aopen_async(hid_t obj_id, const char *attr_name, hid_t aapl_id, hid_t es_id); +H5_DLL hid_t H5Aopen_async(hid_t obj_id, const char *attr_name, hid_t aapl_id, hid_t es_id); #endif /*--------------------------------------------------------------------------*/ /** @@ -824,9 +824,9 @@ H5_DLL hid_t H5Aopen_by_idx_async(const char *app_file, const char *app_func, un const char *obj_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, hid_t aapl_id, hid_t lapl_id, hid_t es_id); #else -H5_DLL hid_t H5Aopen_by_idx_async(hid_t loc_id, const char *obj_name, H5_index_t idx_type, - H5_iter_order_t order, hsize_t n, hid_t aapl_id, hid_t lapl_id, - hid_t es_id); +H5_DLL hid_t H5Aopen_by_idx_async(hid_t loc_id, const char *obj_name, H5_index_t idx_type, + H5_iter_order_t order, hsize_t n, hid_t aapl_id, hid_t lapl_id, + hid_t es_id); #endif /*--------------------------------------------------------------------------*/ /** @@ -879,8 +879,8 @@ H5_DLL hid_t H5Aopen_by_name_async(const char *app_file, const char *app_func, u hid_t loc_id, const char *obj_name, const char *attr_name, hid_t aapl_id, hid_t lapl_id, hid_t es_id); #else -H5_DLL hid_t H5Aopen_by_name_async(hid_t loc_id, const char *obj_name, const char *attr_name, hid_t aapl_id, - hid_t lapl_id, hid_t es_id); +H5_DLL hid_t H5Aopen_by_name_async(hid_t loc_id, const char *obj_name, const char *attr_name, hid_t aapl_id, + hid_t lapl_id, hid_t es_id); #endif /*-------------------------------------------------------------------------- */ diff --git a/src/H5B2.c b/src/H5B2.c index f49689911dc..d3eceff756e 100644 --- a/src/H5B2.c +++ b/src/H5B2.c @@ -1115,7 +1115,7 @@ H5B2_neighbor(H5B2_t *bt2, H5B2_compare_t range, void *udata, H5B2_found_t op, v *------------------------------------------------------------------------- */ herr_t -H5B2_modify(H5B2_t *bt2, void *udata, H5B2_modify_t op, void *op_data) +H5B2_modify(H5B2_t *bt2, void *udata, bool try, H5B2_modify_t op, void *op_data) { H5B2_hdr_t *hdr; /* Pointer to the B-tree header */ H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */ @@ -1142,8 +1142,13 @@ H5B2_modify(H5B2_t *bt2, void *udata, H5B2_modify_t op, void *op_data) curr_node_ptr = hdr->root; /* Check for empty tree */ - if (0 == curr_node_ptr.node_nrec) - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "B-tree has no records"); + if (0 == curr_node_ptr.node_nrec) { + /* Don't fail if the caller set the 'try' flag */ + if (try) + HGOTO_DONE(SUCCEED); + else + HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "B-tree has no records"); + } /* end if */ /* Current depth of the tree */ depth = hdr->depth; @@ -1277,11 +1282,11 @@ H5B2_modify(H5B2_t *bt2, void *udata, H5B2_modify_t op, void *op_data) if (H5AC_unprotect(hdr->f, H5AC_BT2_LEAF, curr_node_ptr.addr, leaf, H5AC__NO_FLAGS_SET) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node"); - /* Note: don't push error on stack, leave that to next higher level, - * since many times the B-tree is searched in order to determine - * if an object exists in the B-tree or not. - */ - HGOTO_DONE(FAIL); + /* Don't fail if the caller set the 'try' flag */ + if (try) + HGOTO_DONE(SUCCEED); + else + HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "record not found"); } else { /* Make callback for current record */ diff --git a/src/H5B2private.h b/src/H5B2private.h index f3f1eaf8a65..bf06a8bd846 100644 --- a/src/H5B2private.h +++ b/src/H5B2private.h @@ -131,7 +131,7 @@ H5_DLL herr_t H5B2_iterate(H5B2_t *bt2, H5B2_operator_t op, void *op_data); H5_DLL herr_t H5B2_find(H5B2_t *bt2, void *udata, bool *found, H5B2_found_t op, void *op_data); H5_DLL herr_t H5B2_index(H5B2_t *bt2, H5_iter_order_t order, hsize_t idx, H5B2_found_t op, void *op_data); H5_DLL herr_t H5B2_neighbor(H5B2_t *bt2, H5B2_compare_t range, void *udata, H5B2_found_t op, void *op_data); -H5_DLL herr_t H5B2_modify(H5B2_t *bt2, void *udata, H5B2_modify_t op, void *op_data); +H5_DLL herr_t H5B2_modify(H5B2_t *bt2, void *udata, bool try, H5B2_modify_t op, void *op_data); H5_DLL herr_t H5B2_update(H5B2_t *bt2, void *udata, H5B2_modify_t op, void *op_data); H5_DLL herr_t H5B2_remove(H5B2_t *b2, void *udata, H5B2_remove_t op, void *op_data); H5_DLL herr_t H5B2_remove_by_idx(H5B2_t *bt2, H5_iter_order_t order, hsize_t idx, H5B2_remove_t op, diff --git a/src/H5Bcache.c b/src/H5Bcache.c index 007912053e9..0b1010ba832 100644 --- a/src/H5Bcache.c +++ b/src/H5Bcache.c @@ -179,6 +179,12 @@ H5B__cache_deserialize(const void *_image, size_t len, void *_udata, bool H5_ATT if (bt->nchildren > shared->two_k) HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "number of children is greater than maximum"); + /* Check in case of level is corrupted, it is unreasonable for level to be + larger than the number of entries */ + if (bt->level > bt->nchildren) + HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, + "level cannot be greater than the number of children, possibly corrupted"); + /* Sibling pointers */ if (H5_IS_BUFFER_OVERFLOW(image, H5F_sizeof_addr(udata->f), p_end)) HGOTO_ERROR(H5E_BTREE, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); diff --git a/src/H5CX.c b/src/H5CX.c index 643c77f9c31..dbcb49e2e6b 100644 --- a/src/H5CX.c +++ b/src/H5CX.c @@ -931,33 +931,27 @@ H5CX_retrieve_state(H5CX_state_t **api_state) } /* end if */ /* Keep a copy of the VOL connector property, if there is one */ - if ((*head)->ctx.vol_connector_prop_valid && (*head)->ctx.vol_connector_prop.connector_id > 0) { + if ((*head)->ctx.vol_connector_prop_valid && (*head)->ctx.vol_connector_prop.connector) { /* Get the connector property */ H5MM_memcpy(&(*api_state)->vol_connector_prop, &(*head)->ctx.vol_connector_prop, sizeof(H5VL_connector_prop_t)); /* Check for actual VOL connector property */ - if ((*api_state)->vol_connector_prop.connector_id) { + if ((*api_state)->vol_connector_prop.connector) { /* Copy connector info, if it exists */ if ((*api_state)->vol_connector_prop.connector_info) { - H5VL_class_t *connector; /* Pointer to connector */ - void *new_connector_info = NULL; /* Copy of connector info */ - - /* Retrieve the connector for the ID */ - if (NULL == - (connector = (H5VL_class_t *)H5I_object((*api_state)->vol_connector_prop.connector_id))) - HGOTO_ERROR(H5E_CONTEXT, H5E_BADTYPE, FAIL, "not a VOL connector ID"); + void *new_connector_info = NULL; /* Copy of connector info */ /* Allocate and copy connector info */ - if (H5VL_copy_connector_info(connector, &new_connector_info, + if (H5VL_copy_connector_info((*api_state)->vol_connector_prop.connector, &new_connector_info, (*api_state)->vol_connector_prop.connector_info) < 0) HGOTO_ERROR(H5E_CONTEXT, H5E_CANTCOPY, FAIL, "connector info copy failed"); (*api_state)->vol_connector_prop.connector_info = new_connector_info; } /* end if */ - /* Increment the refcount on the connector ID */ - if (H5I_inc_ref((*api_state)->vol_connector_prop.connector_id, false) < 0) - HGOTO_ERROR(H5E_CONTEXT, H5E_CANTINC, FAIL, "incrementing VOL connector ID failed"); + /* Increment the refcount on the connector */ + if (H5VL_conn_inc_rc((*api_state)->vol_connector_prop.connector) < 0) + HGOTO_ERROR(H5E_CONTEXT, H5E_CANTINC, FAIL, "incrementing VOL connector refcount failed"); } /* end if */ } /* end if */ @@ -1028,7 +1022,7 @@ H5CX_restore_state(const H5CX_state_t *api_state) (*head)->ctx.vol_wrap_ctx_valid = true; /* Restore the VOL connector info */ - if (api_state->vol_connector_prop.connector_id) { + if (api_state->vol_connector_prop.connector) { H5MM_memcpy(&(*head)->ctx.vol_connector_prop, &api_state->vol_connector_prop, sizeof(H5VL_connector_prop_t)); (*head)->ctx.vol_connector_prop_valid = true; @@ -1087,16 +1081,17 @@ H5CX_free_state(H5CX_state_t *api_state) HGOTO_ERROR(H5E_CONTEXT, H5E_CANTDEC, FAIL, "can't decrement refcount on VOL wrapping context"); /* Release the VOL connector property, if it was set */ - if (api_state->vol_connector_prop.connector_id) { + if (api_state->vol_connector_prop.connector) { /* Clean up any VOL connector info */ if (api_state->vol_connector_prop.connector_info) - if (H5VL_free_connector_info(api_state->vol_connector_prop.connector_id, + if (H5VL_free_connector_info(api_state->vol_connector_prop.connector, api_state->vol_connector_prop.connector_info) < 0) HGOTO_ERROR(H5E_CONTEXT, H5E_CANTRELEASE, FAIL, "unable to release VOL connector info object"); - /* Decrement connector ID */ - if (H5I_dec_ref(api_state->vol_connector_prop.connector_id) < 0) - HDONE_ERROR(H5E_CONTEXT, H5E_CANTDEC, FAIL, "can't close VOL connector ID"); + + /* Decrement connector refcount */ + if (H5VL_conn_dec_rc(api_state->vol_connector_prop.connector) < 0) + HDONE_ERROR(H5E_CONTEXT, H5E_CANTDEC, FAIL, "can't close VOL connector"); } /* end if */ /* Free the state */ diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c index 978b1709945..55bfcd28600 100644 --- a/src/H5Cdbg.c +++ b/src/H5Cdbg.c @@ -675,7 +675,7 @@ void H5C_stats__reset(H5C_t *cache_ptr) #else /* NDEBUG */ #if H5C_COLLECT_CACHE_STATS -H5C_stats__reset(H5C_t *cache_ptr) +H5C_stats__reset(H5C_t *cache_ptr) #else /* H5C_COLLECT_CACHE_STATS */ H5C_stats__reset(H5C_t H5_ATTR_UNUSED *cache_ptr) #endif /* H5C_COLLECT_CACHE_STATS */ diff --git a/src/H5Cimage.c b/src/H5Cimage.c index ec1af787d5e..eb89b735f27 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -82,25 +82,23 @@ * ***********************************************************************/ #if H5C_COLLECT_CACHE_STATS -/* clang-format off */ -#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \ -do { \ - (cache_ptr)->images_created++; \ -} while (0) -#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) \ -do { \ - /* make sure image len is still good */ \ - assert((cache_ptr)->image_len > 0); \ - (cache_ptr)->images_read++; \ -} while (0) -#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) \ -do { \ - /* make sure image len is still good */ \ - assert((cache_ptr)->image_len > 0); \ - (cache_ptr)->images_loaded++; \ - (cache_ptr)->last_image_size = (cache_ptr)->image_len; \ -} while (0) -/* clang-format on */ +#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \ + do { \ + (cache_ptr)->images_created++; \ + } while (0) +#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) \ + do { \ + /* make sure image len is still good */ \ + assert((cache_ptr)->image_len > 0); \ + (cache_ptr)->images_read++; \ + } while (0) +#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) \ + do { \ + /* make sure image len is still good */ \ + assert((cache_ptr)->image_len > 0); \ + (cache_ptr)->images_loaded++; \ + (cache_ptr)->last_image_size = (cache_ptr)->image_len; \ + } while (0) #else /* H5C_COLLECT_CACHE_STATS */ #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 6a636aee76c..b8d579a6df0 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -593,7 +593,7 @@ ***********************************************************************/ #define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3) -#define H5C__HASH_FCN(x) (int)((unsigned)((x)&H5C__HASH_MASK) >> 3) +#define H5C__HASH_FCN(x) (int)((unsigned)((x) & H5C__HASH_MASK) >> 3) #define H5C__POST_HT_SHIFT_TO_FRONT_SC_CMP(cache_ptr, entry_ptr, k) \ ((cache_ptr) == NULL || (cache_ptr)->index[k] != (entry_ptr) || (entry_ptr)->ht_prev != NULL) diff --git a/src/H5D.c b/src/H5D.c index a50a2aceda6..7673acc0711 100644 --- a/src/H5D.c +++ b/src/H5D.c @@ -135,7 +135,7 @@ H5D__create_api_common(hid_t loc_id, const char *name, hid_t type_id, hid_t spac HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, H5I_INVALID_HID, "unable to create dataset"); /* Get an ID for the dataset */ - if ((ret_value = H5VL_register(H5I_DATASET, dset, (*vol_obj_ptr)->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_DATASET, dset, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), true)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register dataset"); done: @@ -222,7 +222,7 @@ H5Dcreate_async(const char *app_file, const char *app_func, unsigned app_line, h /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE11(__func__, "*s*sIui*siiiiii", app_file, app_func, app_line, loc_id, name, type_id, space_id, lcpl_id, dcpl_id, dapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) @@ -309,7 +309,7 @@ H5Dcreate_anon(hid_t loc_id, hid_t type_id, hid_t space_id, hid_t dcpl_id, hid_t HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, H5I_INVALID_HID, "unable to create dataset"); /* Get an ID for the dataset */ - if ((ret_value = H5VL_register(H5I_DATASET, dset, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_DATASET, dset, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register dataset"); done: @@ -360,7 +360,7 @@ H5D__open_api_common(hid_t loc_id, const char *name, hid_t dapl_id, void **token HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open dataset"); /* Register an atom for the dataset */ - if ((ret_value = H5VL_register(H5I_DATASET, dset, (*vol_obj_ptr)->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_DATASET, dset, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), true)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, H5I_INVALID_HID, "can't register dataset ID"); done: @@ -433,7 +433,7 @@ H5Dopen_async(const char *app_file, const char *app_func, unsigned app_line, hid /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIui*sii", app_file, app_func, app_line, loc_id, name, dapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) @@ -489,11 +489,11 @@ H5Dclose(hid_t dset_id) herr_t H5Dclose_async(const char *app_file, const char *app_func, unsigned app_line, hid_t dset_id, hid_t es_id) { - void *token = NULL; /* Request token for async operation */ - void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ - H5VL_object_t *vol_obj = NULL; /* VOL object of dset_id */ - H5VL_t *connector = NULL; /* VOL connector */ - herr_t ret_value = SUCCEED; /* Return value */ + void *token = NULL; /* Request token for async operation */ + void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ + H5VL_object_t *vol_obj = NULL; /* VOL object of dset_id */ + H5VL_connector_t *connector = NULL; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -509,7 +509,7 @@ H5Dclose_async(const char *app_file, const char *app_func, unsigned app_line, hi if (H5ES_NONE != es_id) { /* Increase connector's refcount, so it doesn't get closed if closing * the dataset closes the file */ - connector = vol_obj->connector; + connector = H5VL_OBJ_CONNECTOR(vol_obj); H5VL_conn_inc_rc(connector); /* Point at token for operation to set up */ @@ -525,7 +525,7 @@ H5Dclose_async(const char *app_file, const char *app_func, unsigned app_line, hi /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, dset_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -639,7 +639,7 @@ H5Dget_space_async(const char *app_file, const char *app_func, unsigned app_line /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, dset_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) @@ -929,11 +929,11 @@ H5D__read_api_common(size_t count, hid_t dset_id[], hid_t mem_type_id[], hid_t m H5VL_object_t *tmp_vol_obj = NULL; /* Object for loc_id */ H5VL_object_t **vol_obj_ptr = (_vol_obj_ptr ? _vol_obj_ptr : &tmp_vol_obj); /* Ptr to object ptr for loc_id */ - void *obj_local; /* Local buffer for obj */ - void **obj = &obj_local; /* Array of object pointers */ - H5VL_t *connector; /* VOL connector pointer */ - size_t i; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + void *obj_local; /* Local buffer for obj */ + void **obj = &obj_local; /* Array of object pointers */ + H5VL_connector_t *connector; /* VOL connector pointer */ + size_t i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -954,7 +954,7 @@ H5D__read_api_common(size_t count, hid_t dset_id[], hid_t mem_type_id[], hid_t m /* Allocate obj array if necessary */ if (count > 1) if (NULL == (obj = (void **)H5MM_malloc(count * sizeof(void *)))) - HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, FAIL, "can't allocate space for object array"); + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "can't allocate space for object array"); /* Get vol_obj_ptr (return just the first dataset to caller if requested) */ if (NULL == (*vol_obj_ptr = H5VL_vol_object_verify(dset_id[0], H5I_DATASET))) @@ -963,18 +963,22 @@ H5D__read_api_common(size_t count, hid_t dset_id[], hid_t mem_type_id[], hid_t m /* Save the connector of the first dataset. Unpack the connector and call * the "direct" read function here to avoid allocating an array of count * H5VL_object_ts. */ - connector = (*vol_obj_ptr)->connector; + connector = H5VL_OBJ_CONNECTOR(*vol_obj_ptr); /* Build obj array */ - obj[0] = (*vol_obj_ptr)->data; + obj[0] = H5VL_OBJ_DATA(*vol_obj_ptr); for (i = 1; i < count; i++) { + htri_t cls_cmp; + /* Get the object */ if (NULL == (tmp_vol_obj = H5VL_vol_object_verify(dset_id[i], H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dset_id is not a dataset ID"); - obj[i] = tmp_vol_obj->data; + obj[i] = H5VL_OBJ_DATA(tmp_vol_obj); /* Make sure the class matches */ - if (tmp_vol_obj->connector->cls->value != connector->cls->value) + if ((cls_cmp = H5VL_conn_same_class(H5VL_OBJ_CONNECTOR(tmp_vol_obj), connector)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOMPARE, FAIL, "can't compare VOL connectors"); + if (!cls_cmp) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "datasets are accessed through different VOL connectors and can't be used in the " "same I/O call"); @@ -1079,7 +1083,7 @@ H5Dread_async(const char *app_file, const char *app_func, unsigned app_line, hid /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE10(__func__, "*s*sIuiiiii*xi", app_file, app_func, app_line, dset_id, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1152,7 +1156,7 @@ H5Dread_multi_async(const char *app_file, const char *app_func, unsigned app_lin /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE11(__func__, "*s*sIuz*i*i*i*ii**xi", app_file, app_func, app_line, count, dset_id, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1231,11 +1235,11 @@ H5D__write_api_common(size_t count, hid_t dset_id[], hid_t mem_type_id[], hid_t H5VL_object_t *tmp_vol_obj = NULL; /* Object for loc_id */ H5VL_object_t **vol_obj_ptr = (_vol_obj_ptr ? _vol_obj_ptr : &tmp_vol_obj); /* Ptr to object ptr for loc_id */ - void *obj_local; /* Local buffer for obj */ - void **obj = &obj_local; /* Array of object pointers */ - H5VL_t *connector; /* VOL connector pointer */ - size_t i; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + void *obj_local; /* Local buffer for obj */ + void **obj = &obj_local; /* Array of object pointers */ + H5VL_connector_t *connector; /* VOL connector pointer */ + size_t i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -1256,7 +1260,7 @@ H5D__write_api_common(size_t count, hid_t dset_id[], hid_t mem_type_id[], hid_t /* Allocate obj array if necessary */ if (count > 1) if (NULL == (obj = (void **)H5MM_malloc(count * sizeof(void *)))) - HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, FAIL, "can't allocate space for object array"); + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "can't allocate space for object array"); /* Get vol_obj_ptr (return just the first dataset to caller if requested) */ if (NULL == (*vol_obj_ptr = (H5VL_object_t *)H5I_object_verify(dset_id[0], H5I_DATASET))) @@ -1265,18 +1269,22 @@ H5D__write_api_common(size_t count, hid_t dset_id[], hid_t mem_type_id[], hid_t /* Save the connector of the first dataset. Unpack the connector and call * the "direct" write function here to avoid allocating an array of count * H5VL_object_ts. */ - connector = (*vol_obj_ptr)->connector; + connector = H5VL_OBJ_CONNECTOR(*vol_obj_ptr); /* Build obj array */ - obj[0] = (*vol_obj_ptr)->data; + obj[0] = H5VL_OBJ_DATA(*vol_obj_ptr); for (i = 1; i < count; i++) { + htri_t cls_cmp; + /* Get the object */ if (NULL == (tmp_vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id[i], H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dset_id is not a dataset ID"); - obj[i] = tmp_vol_obj->data; + obj[i] = H5VL_OBJ_DATA(tmp_vol_obj); /* Make sure the class matches */ - if (tmp_vol_obj->connector->cls->value != connector->cls->value) + if ((cls_cmp = H5VL_conn_same_class(H5VL_OBJ_CONNECTOR(tmp_vol_obj), connector)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOMPARE, FAIL, "can't compare VOL connectors"); + if (!cls_cmp) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "datasets are accessed through different VOL connectors and can't be used in the " "same I/O call"); @@ -1383,7 +1391,7 @@ H5Dwrite_async(const char *app_file, const char *app_func, unsigned app_line, hi /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE10(__func__, "*s*sIuiiiii*xi", app_file, app_func, app_line, dset_id, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1456,7 +1464,7 @@ H5Dwrite_multi_async(const char *app_file, const char *app_func, unsigned app_li /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE11(__func__, "*s*sIuz*i*i*i*ii**xi", app_file, app_func, app_line, count, dset_id, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -2012,7 +2020,7 @@ H5Dset_extent_async(const char *app_file, const char *app_func, unsigned app_lin /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE6(__func__, "*s*sIui*hi", app_file, app_func, app_line, dset_id, size, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "can't insert token into event set"); diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 6419a52700b..d232024a3f0 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -245,7 +245,7 @@ typedef struct H5D_chunk_coll_fill_info_t { haddr_t addr; /* File address of the chunk */ size_t chunk_size; /* Size of the chunk in the file */ bool unfiltered_partial_chunk; - } * chunk_info; + } *chunk_info; } H5D_chunk_coll_fill_info_t; #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5Ddeprec.c b/src/H5Ddeprec.c index 51d6e4323c0..a8f92fe5c55 100644 --- a/src/H5Ddeprec.c +++ b/src/H5Ddeprec.c @@ -137,7 +137,7 @@ H5Dcreate1(hid_t loc_id, const char *name, hid_t type_id, hid_t space_id, hid_t HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, H5I_INVALID_HID, "unable to create dataset"); /* Register the new dataset to get an ID for it */ - if ((ret_value = H5VL_register(H5I_DATASET, dset, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_DATASET, dset, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register dataset"); done: @@ -192,7 +192,7 @@ H5Dopen1(hid_t loc_id, const char *name) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open dataset"); /* Get an ID for the dataset */ - if ((ret_value = H5VL_register(H5I_DATASET, dset, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_DATASET, dset, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, H5I_INVALID_HID, "can't register dataset ID"); done: diff --git a/src/H5Defl.c b/src/H5Defl.c index 34d48e690b7..42678761a00 100644 --- a/src/H5Defl.c +++ b/src/H5Defl.c @@ -323,7 +323,7 @@ H5D__efl_read(const H5O_efl_t *efl, const H5D_t *dset, haddr_t addr, size_t size H5_CHECK_OVERFLOW(tempto_read, hsize_t, size_t); to_read = (size_t)tempto_read; #else /* NDEBUG */ - to_read = MIN((size_t)(efl->slot[u].size - skip), (hsize_t)size); + to_read = MIN((size_t)(efl->slot[u].size - skip), (hsize_t)size); #endif /* NDEBUG */ /* Inner loop - read to_read bytes from a single external file */ diff --git a/src/H5Dint.c b/src/H5Dint.c index 700d8306604..2b1d0c7e20b 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -2791,6 +2791,7 @@ H5D__vlen_get_buf_size_gen_cb(void H5_ATTR_UNUSED *elem, hid_t type_id, unsigned { H5D_vlen_bufsize_generic_t *vlen_bufsize = (H5D_vlen_bufsize_generic_t *)op_data; H5T_t *dt; /* Datatype for operation */ + void *vol_obj_data; /* VOL object's data pointer */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -2813,8 +2814,9 @@ H5D__vlen_get_buf_size_gen_cb(void H5_ATTR_UNUSED *elem, hid_t type_id, unsigned HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't select point"); /* Read in the point (with the custom VL memory allocator) */ - if (H5VL_dataset_read(1, &vlen_bufsize->dset_vol_obj->data, vlen_bufsize->dset_vol_obj->connector, - &type_id, &vlen_bufsize->mspace_id, &vlen_bufsize->fspace_id, vlen_bufsize->dxpl_id, + vol_obj_data = H5VL_OBJ_DATA(vlen_bufsize->dset_vol_obj); + if (H5VL_dataset_read(1, &vol_obj_data, H5VL_OBJ_CONNECTOR(vlen_bufsize->dset_vol_obj), &type_id, + &vlen_bufsize->mspace_id, &vlen_bufsize->fspace_id, vlen_bufsize->dxpl_id, &vlen_bufsize->common.fl_tbuf, H5_REQUEST_NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read point"); diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c index bd3765e2236..ae2b9aa4a74 100644 --- a/src/H5Dlayout.c +++ b/src/H5Dlayout.c @@ -49,6 +49,7 @@ const unsigned H5O_layout_ver_bounds[] = { H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V112 */ H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V114 */ H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V116 */ + H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V118 */ H5O_LAYOUT_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index 8692acdeff8..fbe60f03735 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -332,8 +332,8 @@ H5_DLL hid_t H5Dcreate_async(const char *app_file, const char *app_func, unsigne const char *name, hid_t type_id, hid_t space_id, hid_t lcpl_id, hid_t dcpl_id, hid_t dapl_id, hid_t es_id); #else -H5_DLL hid_t H5Dcreate_async(hid_t loc_id, const char *name, hid_t type_id, hid_t space_id, hid_t lcpl_id, - hid_t dcpl_id, hid_t dapl_id, hid_t es_id); +H5_DLL hid_t H5Dcreate_async(hid_t loc_id, const char *name, hid_t type_id, hid_t space_id, hid_t lcpl_id, + hid_t dcpl_id, hid_t dapl_id, hid_t es_id); #endif /** @@ -414,7 +414,7 @@ H5_DLL hid_t H5Dopen2(hid_t loc_id, const char *name, hid_t dapl_id); H5_DLL hid_t H5Dopen_async(const char *app_file, const char *app_func, unsigned app_line, hid_t loc_id, const char *name, hid_t dapl_id, hid_t es_id); #else -H5_DLL hid_t H5Dopen_async(hid_t loc_id, const char *name, hid_t dapl_id, hid_t es_id); +H5_DLL hid_t H5Dopen_async(hid_t loc_id, const char *name, hid_t dapl_id, hid_t es_id); #endif /** @@ -454,7 +454,7 @@ H5_DLL hid_t H5Dget_space(hid_t dset_id); H5_DLL hid_t H5Dget_space_async(const char *app_file, const char *app_func, unsigned app_line, hid_t dset_id, hid_t es_id); #else -H5_DLL hid_t H5Dget_space_async(hid_t dset_id, hid_t es_id); +H5_DLL hid_t H5Dget_space_async(hid_t dset_id, hid_t es_id); #endif /** diff --git a/src/H5Dvirtual.c b/src/H5Dvirtual.c index ac31231b1e8..d10f4af8520 100644 --- a/src/H5Dvirtual.c +++ b/src/H5Dvirtual.c @@ -3124,7 +3124,7 @@ H5D__virtual_refresh_source_dset(H5D_t **dset) HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "can't unregister source dataset ID"); if (NULL == (*dset = (H5D_t *)H5VL_object_unwrap(vol_obj))) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve library object from VOL object"); - vol_obj->data = NULL; + H5VL_OBJ_DATA_RESET(vol_obj); done: if (vol_obj && H5VL_free_object(vol_obj) < 0) diff --git a/src/H5E.c b/src/H5E.c index f1b865e7c17..a037ffa595e 100644 --- a/src/H5E.c +++ b/src/H5E.c @@ -611,7 +611,7 @@ H5Eclear2(hid_t err_stack) } /* end else */ /* Clear the error stack */ - if (H5E__clear_stack(estack) < 0) + if (H5E__destroy_stack(estack) < 0) HGOTO_ERROR(H5E_ERROR, H5E_CANTSET, FAIL, "can't clear error stack"); done: diff --git a/src/H5ES.c b/src/H5ES.c index a354db3016d..7b830b5f740 100644 --- a/src/H5ES.c +++ b/src/H5ES.c @@ -111,9 +111,9 @@ H5EScreate(void) herr_t H5ESinsert_request(hid_t es_id, hid_t connector_id, void *request) { - H5ES_t *es; /* Event set */ - H5VL_t *connector = NULL; /* VOL connector */ - herr_t ret_value = SUCCEED; /* Return value */ + H5ES_t *es; /* Event set */ + H5VL_connector_t *connector = NULL; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -122,22 +122,14 @@ H5ESinsert_request(hid_t es_id, hid_t connector_id, void *request) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid event set identifier"); if (NULL == request) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL request pointer"); - - /* Create new VOL connector object, using the connector ID */ - if (NULL == (connector = H5VL_new_connector(connector_id))) - HGOTO_ERROR(H5E_EVENTSET, H5E_CANTCREATE, FAIL, "can't create VOL connector object"); + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Insert request into event set */ if (H5ES__insert_request(es, connector, request) < 0) HGOTO_ERROR(H5E_EVENTSET, H5E_CANTINSERT, FAIL, "can't insert request into event set"); done: - /* Clean up on error */ - if (ret_value < 0) - /* Release newly created connector */ - if (connector && H5VL_conn_dec_rc(connector) < 0) - HDONE_ERROR(H5E_EVENTSET, H5E_CANTDEC, FAIL, "unable to decrement ref count on VOL connector"); - FUNC_LEAVE_API(ret_value) } /* end H5ESinsert_request() */ diff --git a/src/H5ESevent.c b/src/H5ESevent.c index 3286424ff13..d925fb407d8 100644 --- a/src/H5ESevent.c +++ b/src/H5ESevent.c @@ -79,7 +79,7 @@ H5FL_DEFINE_STATIC(H5ES_event_t); *------------------------------------------------------------------------- */ H5ES_event_t * -H5ES__event_new(H5VL_t *connector, void *token) +H5ES__event_new(H5VL_connector_t *connector, void *token) { H5ES_event_t *ev = NULL; /* New event */ H5VL_object_t *request = NULL; /* Async request token VOL object */ diff --git a/src/H5ESint.c b/src/H5ESint.c index dab683913d6..eef68d579c2 100644 --- a/src/H5ESint.c +++ b/src/H5ESint.c @@ -39,6 +39,7 @@ #include "H5Iprivate.h" /* IDs */ #include "H5MMprivate.h" /* Memory management */ #include "H5RSprivate.h" /* Reference-counted strings */ +#include "H5VLprivate.h" /* Virtual Object Layer */ /****************/ /* Local Macros */ @@ -88,7 +89,7 @@ typedef struct H5ES_gei_ctx_t { /********************/ static herr_t H5ES__close(H5ES_t *es); static herr_t H5ES__close_cb(void *es, void **request_token); -static herr_t H5ES__insert(H5ES_t *es, H5VL_t *connector, void *request_token, const char *app_file, +static herr_t H5ES__insert(H5ES_t *es, H5VL_connector_t *connector, void *request_token, const char *app_file, const char *app_func, unsigned app_line, const char *caller, const char *api_args); static int H5ES__get_requests_cb(H5ES_event_t *ev, void *_ctx); static herr_t H5ES__handle_fail(H5ES_t *es, H5ES_event_t *ev); @@ -240,8 +241,8 @@ H5ES__create(void) *------------------------------------------------------------------------- */ static herr_t -H5ES__insert(H5ES_t *es, H5VL_t *connector, void *request_token, const char *app_file, const char *app_func, - unsigned app_line, const char *caller, const char *api_args) +H5ES__insert(H5ES_t *es, H5VL_connector_t *connector, void *request_token, const char *app_file, + const char *app_func, unsigned app_line, const char *caller, const char *api_args) { H5ES_event_t *ev = NULL; /* Event for request */ bool ev_inserted = false; /* Flag to indicate that event is in active list */ @@ -313,7 +314,8 @@ H5ES__insert(H5ES_t *es, H5VL_t *connector, void *request_token, const char *app *------------------------------------------------------------------------- */ herr_t -H5ES_insert(hid_t es_id, H5VL_t *connector, void *token, const char *caller, const char *caller_args, ...) +H5ES_insert(hid_t es_id, H5VL_connector_t *connector, void *token, const char *caller, + const char *caller_args, ...) { H5ES_t *es = NULL; /* Event set for the operation */ const char *app_file; /* Application source file name */ @@ -389,7 +391,7 @@ H5ES_insert(hid_t es_id, H5VL_t *connector, void *token, const char *caller, con *------------------------------------------------------------------------- */ herr_t -H5ES__insert_request(H5ES_t *es, H5VL_t *connector, void *token) +H5ES__insert_request(H5ES_t *es, H5VL_connector_t *connector, void *token) { herr_t ret_value = SUCCEED; /* Return value */ @@ -424,7 +426,7 @@ H5ES__get_requests_cb(H5ES_event_t *ev, void *_ctx) H5ES_get_requests_ctx_t *ctx = (H5ES_get_requests_ctx_t *)_ctx; /* Callback context */ int ret_value = H5_ITER_CONT; /* Return value */ - FUNC_ENTER_PACKAGE_NOERR + FUNC_ENTER_PACKAGE /* Sanity check */ assert(ev); @@ -433,16 +435,18 @@ H5ES__get_requests_cb(H5ES_event_t *ev, void *_ctx) /* Get the connector ID for the event */ if (ctx->connector_ids) - ctx->connector_ids[ctx->i] = ev->request->connector->id; + if ((ctx->connector_ids[ctx->i] = H5VL_conn_register(H5VL_OBJ_CONNECTOR(ev->request))) < 0) + HGOTO_ERROR(H5E_EVENTSET, H5E_CANTREGISTER, H5_ITER_ERROR, "unable to register VOL connector ID"); /* Get the request for the event */ if (ctx->requests) - ctx->requests[ctx->i] = ev->request->data; + ctx->requests[ctx->i] = H5VL_OBJ_DATA(ev->request); /* Check if we've run out of room in the arrays */ if (++ctx->i == ctx->array_len) ret_value = H5_ITER_STOP; +done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5ES__get_requests_cb() */ diff --git a/src/H5ESpkg.h b/src/H5ESpkg.h index 1da58a68679..853f39fefad 100644 --- a/src/H5ESpkg.h +++ b/src/H5ESpkg.h @@ -76,7 +76,7 @@ typedef int (*H5ES_list_iter_func_t)(H5ES_event_t *ev, void *ctx); /* Package Private Prototypes */ /******************************/ H5_DLL H5ES_t *H5ES__create(void) H5_ATTR_MALLOC; -H5_DLL herr_t H5ES__insert_request(H5ES_t *es, H5VL_t *connector, void *token); +H5_DLL herr_t H5ES__insert_request(H5ES_t *es, H5VL_connector_t *connector, void *token); H5_DLL herr_t H5ES__wait(H5ES_t *es, uint64_t timeout, size_t *num_in_progress, bool *op_failed); H5_DLL herr_t H5ES__get_requests(H5ES_t *es, H5_iter_order_t order, hid_t *connector_ids, void **requests, size_t array_len); @@ -92,7 +92,7 @@ H5_DLL int H5ES__list_iterate(H5ES_event_list_t *el, H5_iter_order_t order, H H5_DLL void H5ES__list_remove(H5ES_event_list_t *el, const H5ES_event_t *ev); /* Event operations */ -H5_DLL H5ES_event_t *H5ES__event_new(H5VL_t *connector, void *token); +H5_DLL H5ES_event_t *H5ES__event_new(H5VL_connector_t *connector, void *token); H5_DLL herr_t H5ES__event_free(H5ES_event_t *ev); H5_DLL herr_t H5ES__event_completed(H5ES_event_t *ev, H5ES_event_list_t *el); diff --git a/src/H5ESprivate.h b/src/H5ESprivate.h index 52392fbfd44..2de833de459 100644 --- a/src/H5ESprivate.h +++ b/src/H5ESprivate.h @@ -47,8 +47,8 @@ typedef struct H5ES_t H5ES_t; /***************************************/ /* Library-private Function Prototypes */ /***************************************/ -herr_t H5ES_insert(hid_t es_id, H5VL_t *connector, void *token, const char *caller, const char *caller_args, - ...); +herr_t H5ES_insert(hid_t es_id, H5VL_connector_t *connector, void *token, const char *caller, + const char *caller_args, ...); H5_DLL herr_t H5ES_init(void); #endif /* H5ESprivate_H */ diff --git a/src/H5Eint.c b/src/H5Eint.c index d01668e731d..333bc202c0b 100644 --- a/src/H5Eint.c +++ b/src/H5Eint.c @@ -646,7 +646,7 @@ H5E__get_current_stack(void) estack_copy->auto_data = current_stack->auto_data; /* Empty current error stack */ - H5E__clear_stack(current_stack); + H5E__destroy_stack(current_stack); /* Set the return value */ ret_value = estack_copy; @@ -685,7 +685,7 @@ H5E__set_current_stack(H5E_stack_t *estack) HGOTO_ERROR(H5E_ERROR, H5E_CANTGET, FAIL, "can't get current error stack"); /* Empty current error stack */ - H5E__clear_stack(current_stack); + H5E__destroy_stack(current_stack); /* Copy new stack to current error stack */ current_stack->nused = estack->nused; @@ -715,7 +715,7 @@ H5E__close_stack(H5E_stack_t *estack, void H5_ATTR_UNUSED **request) assert(estack); /* Release the stack's error information */ - H5E__clear_stack(estack); + H5E__destroy_stack(estack); /* Free the stack structure */ estack = H5FL_FREE(H5E_stack_t, estack); @@ -1673,6 +1673,15 @@ H5E__clear_entries(H5E_stack_t *estack, size_t nentries) * * Purpose: Clear the default error stack * + * Note: This routine should _not_ be used inside general library + * code in general. It creates complex locking issues for + * threadsafe code. Generally, using a 'try' parameter or + * an 'exists' parameter should be used if an operation is + * being used to probe for information. Remember: failing + * to locate a record is not an error for a data structure, + * although it could be an error for the user of the data + * structure. + * * Return: SUCCEED/FAIL * *------------------------------------------------------------------------- @@ -1699,16 +1708,20 @@ H5E_clear_stack(void) } /* end H5E_clear_stack() */ /*------------------------------------------------------------------------- - * Function: H5E__clear_stack + * Function: H5E__destroy_stack + * + * Purpose: Clear all internal state within an error stack, as a precursor to freeing it. * - * Purpose: Clear the specified error stack + * At present, this is nearly identical to H5E_clear_stack(), + * but if additional resources are added to the error stack in the future, + * they will only be released by this routine and not by H5E_clear_stack(). * * Return: SUCCEED/FAIL * *------------------------------------------------------------------------- */ herr_t -H5E__clear_stack(H5E_stack_t *estack) +H5E__destroy_stack(H5E_stack_t *estack) { herr_t ret_value = SUCCEED; /* Return value */ @@ -1726,7 +1739,7 @@ H5E__clear_stack(H5E_stack_t *estack) done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5E__clear_stack() */ +} /* end H5E__destroy_stack() */ /*------------------------------------------------------------------------- * Function: H5E__pop diff --git a/src/H5Epkg.h b/src/H5Epkg.h index b758cbe4ec9..2eda2b24e2d 100644 --- a/src/H5Epkg.h +++ b/src/H5Epkg.h @@ -153,6 +153,6 @@ H5_DLL herr_t H5E__get_auto(const H5E_stack_t *estack, H5E_auto_op_t *op, H5_DLL herr_t H5E__set_auto(H5E_stack_t *estack, const H5E_auto_op_t *op, void *client_data); H5_DLL herr_t H5E__pop(H5E_stack_t *err_stack, size_t count); H5_DLL herr_t H5E__append_stack(H5E_stack_t *dst_estack, const H5E_stack_t *src_stack); -H5_DLL herr_t H5E__clear_stack(H5E_stack_t *estack); +H5_DLL herr_t H5E__destroy_stack(H5E_stack_t *estack); #endif /* H5Epkg_H */ diff --git a/src/H5F.c b/src/H5F.c index 5dd7bda3903..b35ed2ef7c3 100644 --- a/src/H5F.c +++ b/src/H5F.c @@ -88,12 +88,6 @@ static herr_t H5F__flush_api_common(hid_t object_id, H5F_scope_t scope, void **t /* Local Variables */ /*******************/ -/* Declare a free list to manage the H5VL_t struct */ -H5FL_EXTERN(H5VL_t); - -/* Declare a free list to manage the H5VL_object_t struct */ -H5FL_EXTERN(H5VL_object_t); - /*------------------------------------------------------------------------- * Function: H5Fget_create_plist * @@ -603,12 +597,12 @@ H5F__create_api_common(const char *filename, unsigned flags, hid_t fcpl_id, hid_ flags |= H5F_ACC_RDWR | H5F_ACC_CREAT; /* Create a new file or truncate an existing file through the VOL */ - if (NULL == (new_file = H5VL_file_create(&connector_prop, filename, flags, fcpl_id, fapl_id, + if (NULL == (new_file = H5VL_file_create(connector_prop.connector, filename, flags, fcpl_id, fapl_id, H5P_DATASET_XFER_DEFAULT, token_ptr))) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, H5I_INVALID_HID, "unable to create file"); /* Get an ID for the file */ - if ((ret_value = H5VL_register_using_vol_id(H5I_FILE, new_file, connector_prop.connector_id, true)) < 0) + if ((ret_value = H5VL_register(H5I_FILE, new_file, connector_prop.connector, true)) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register file handle"); done: @@ -701,7 +695,7 @@ H5Fcreate_async(const char *app_file, const char *app_func, unsigned app_line, c /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE8(__func__, "*s*sIu*sIuiii", app_file, app_func, app_line, filename, flags, fcpl_id, fapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) @@ -720,7 +714,7 @@ H5Fcreate_async(const char *app_file, const char *app_func, unsigned app_line, c /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE8(__func__, "*s*sIu*sIuiii", app_file, app_func, app_line, filename, flags, fcpl_id, fapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_FILE, H5E_CANTINSERT, H5I_INVALID_HID, "can't insert token into event set"); @@ -783,12 +777,12 @@ H5F__open_api_common(const char *filename, unsigned flags, hid_t fapl_id, void * HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set VOL connector info in API context"); /* Open the file through the VOL layer */ - if (NULL == (new_file = H5VL_file_open(&connector_prop, filename, flags, fapl_id, + if (NULL == (new_file = H5VL_file_open(connector_prop.connector, filename, flags, fapl_id, H5P_DATASET_XFER_DEFAULT, token_ptr))) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, H5I_INVALID_HID, "unable to open file"); /* Get an ID for the file */ - if ((ret_value = H5VL_register_using_vol_id(H5I_FILE, new_file, connector_prop.connector_id, true)) < 0) + if ((ret_value = H5VL_register(H5I_FILE, new_file, connector_prop.connector, true)) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register file handle"); done: @@ -876,7 +870,7 @@ H5Fopen_async(const char *app_file, const char *app_func, unsigned app_line, con /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIu*sIuii", app_file, app_func, app_line, filename, flags, fapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) @@ -895,7 +889,7 @@ H5Fopen_async(const char *app_file, const char *app_func, unsigned app_line, con /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIu*sIuii", app_file, app_func, app_line, filename, flags, fapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_FILE, H5E_CANTINSERT, H5I_INVALID_HID, "can't insert token into event set"); @@ -1006,7 +1000,7 @@ H5Fflush_async(const char *app_file, const char *app_func, unsigned app_line, hi /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE6(__func__, "*s*sIuiFsi", app_file, app_func, app_line, object_id, scope, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_FILE, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1062,11 +1056,11 @@ H5Fclose(hid_t file_id) herr_t H5Fclose_async(const char *app_file, const char *app_func, unsigned app_line, hid_t file_id, hid_t es_id) { - H5VL_object_t *vol_obj = NULL; /* Object for loc_id */ - H5VL_t *connector = NULL; /* VOL connector */ - void *token = NULL; /* Request token for async operation */ - void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_object_t *vol_obj = NULL; /* Object for loc_id */ + H5VL_connector_t *connector = NULL; /* VOL connector */ + void *token = NULL; /* Request token for async operation */ + void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -1082,7 +1076,7 @@ H5Fclose_async(const char *app_file, const char *app_func, unsigned app_line, hi /* Increase connector's refcount, so it doesn't get closed if closing * this file ID closes the file */ - connector = vol_obj->connector; + connector = H5VL_OBJ_CONNECTOR(vol_obj); H5VL_conn_inc_rc(connector); /* Point at token for operation to set up */ @@ -1098,7 +1092,7 @@ H5Fclose_async(const char *app_file, const char *app_func, unsigned app_line, hi /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, file_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_FILE, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1193,8 +1187,8 @@ H5Fmount(hid_t loc_id, const char *name, hid_t child_id, hid_t plist_id) H5VL_group_specific_args_t vol_cb_args; /* Arguments to VOL callback */ void *grp = NULL; /* Root group opened */ H5I_type_t loc_type; /* ID type of location */ - int same_connector = 0; /* Whether parent and child files use the same connector */ - herr_t ret_value = SUCCEED; /* Return value */ + htri_t same_connector; /* Whether parent and child files use the same connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -1239,7 +1233,7 @@ H5Fmount(hid_t loc_id, const char *name, hid_t child_id, hid_t plist_id) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "unable to open group"); /* Create a VOL object for the root group */ - if (NULL == (loc_vol_obj = H5VL_create_object(grp, vol_obj->connector))) + if (NULL == (loc_vol_obj = H5VL_create_object(grp, H5VL_OBJ_CONNECTOR(vol_obj)))) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "can't create VOL object for root group"); } /* end if */ else { @@ -1253,10 +1247,10 @@ H5Fmount(hid_t loc_id, const char *name, hid_t child_id, hid_t plist_id) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "could not get child object"); /* Check if both objects are associated with the same VOL connector */ - if (H5VL_cmp_connector_cls(&same_connector, loc_vol_obj->connector->cls, child_vol_obj->connector->cls) < - 0) + if ((same_connector = + H5VL_conn_same_class(H5VL_OBJ_CONNECTOR(loc_vol_obj), H5VL_OBJ_CONNECTOR(child_vol_obj))) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); - if (same_connector) + if (!same_connector) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "can't mount file onto object from different VOL connector"); @@ -1264,7 +1258,7 @@ H5Fmount(hid_t loc_id, const char *name, hid_t child_id, hid_t plist_id) vol_cb_args.op_type = H5VL_GROUP_MOUNT; vol_cb_args.args.mount.name = name; vol_cb_args.args.mount.child_file = - child_vol_obj->data; /* Don't unwrap fully, so each connector can see its object */ + H5VL_OBJ_DATA(child_vol_obj); /* Don't unwrap fully, so each connector can see its object */ vol_cb_args.args.mount.fmpl_id = plist_id; /* Perform the mount operation */ @@ -1349,7 +1343,7 @@ H5Funmount(hid_t loc_id, const char *name) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "unable to open group"); /* Create a VOL object for the root group */ - if (NULL == (loc_vol_obj = H5VL_create_object(grp, vol_obj->connector))) + if (NULL == (loc_vol_obj = H5VL_create_object(grp, H5VL_OBJ_CONNECTOR(vol_obj)))) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "can't create VOL object for root group"); } /* end if */ else { @@ -1420,7 +1414,7 @@ H5F__reopen_api_common(hid_t file_id, void **token_ptr) HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, H5I_INVALID_HID, "unable to reopen file"); /* Get an ID for the file */ - if ((ret_value = H5VL_register(H5I_FILE, reopen_file, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_FILE, reopen_file, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register file handle"); done: @@ -1504,7 +1498,7 @@ H5Freopen_async(const char *app_file, const char *app_func, unsigned app_line, h /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, file_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref(ret_value) < 0) @@ -1523,7 +1517,7 @@ H5Freopen_async(const char *app_file, const char *app_func, unsigned app_line, h /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, file_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_FILE, H5E_CANTINSERT, H5I_INVALID_HID, "can't insert token into event set"); diff --git a/src/H5FDhdfs.c b/src/H5FDhdfs.c index 7f7a2950ae3..25fe7cba95a 100644 --- a/src/H5FDhdfs.c +++ b/src/H5FDhdfs.c @@ -73,7 +73,7 @@ static hid_t H5FD_HDFS_g = 0; unsigned long long donotshadowresult = 1; \ unsigned donotshadowindex = 0; \ for (donotshadowindex = 0; \ - donotshadowindex < (((bin_i)*HDFS_STATS_INTERVAL) + HDFS_STATS_START_POWER); \ + donotshadowindex < (((bin_i) * HDFS_STATS_INTERVAL) + HDFS_STATS_START_POWER); \ donotshadowindex++) { \ donotshadowresult *= HDFS_STATS_BASE; \ } \ diff --git a/src/H5FDlog.c b/src/H5FDlog.c index e8131726fef..8c02415ce08 100644 --- a/src/H5FDlog.c +++ b/src/H5FDlog.c @@ -1225,7 +1225,7 @@ H5FD__log_read(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, had if (bytes_read > 0) offset += bytes_read; #else - bytes_read = HDread(file->fd, buf, bytes_in); + bytes_read = HDread(file->fd, buf, bytes_in); #endif /* H5_HAVE_PREADWRITE */ } while (-1 == bytes_read && EINTR == errno); diff --git a/src/H5FDmirror.c b/src/H5FDmirror.c index d5b9c08186d..bde3647f652 100644 --- a/src/H5FDmirror.c +++ b/src/H5FDmirror.c @@ -65,10 +65,10 @@ typedef struct H5FD_mirror_t { #ifndef BSWAP_64 #define BSWAP_64(X) \ - (uint64_t)((((X)&0x00000000000000FF) << 56) | (((X)&0x000000000000FF00) << 40) | \ - (((X)&0x0000000000FF0000) << 24) | (((X)&0x00000000FF000000) << 8) | \ - (((X)&0x000000FF00000000) >> 8) | (((X)&0x0000FF0000000000) >> 24) | \ - (((X)&0x00FF000000000000) >> 40) | (((X)&0xFF00000000000000) >> 56)) + (uint64_t)((((X) & 0x00000000000000FF) << 56) | (((X) & 0x000000000000FF00) << 40) | \ + (((X) & 0x0000000000FF0000) << 24) | (((X) & 0x00000000FF000000) << 8) | \ + (((X) & 0x000000FF00000000) >> 8) | (((X) & 0x0000FF0000000000) >> 24) | \ + (((X) & 0x00FF000000000000) >> 40) | (((X) & 0xFF00000000000000) >> 56)) #endif /* BSWAP_64 */ /* Debugging flabs for verbose tracing -- nonzero to enable */ diff --git a/src/H5FDs3comms.c b/src/H5FDs3comms.c index 0d1cd0f868a..78ed5273f52 100644 --- a/src/H5FDs3comms.c +++ b/src/H5FDs3comms.c @@ -1761,7 +1761,6 @@ H5FD__s3comms_load_aws_creds_from_file(FILE *file, const char *profile_name, cha }; unsigned setting_count = 3; herr_t ret_value = SUCCEED; - unsigned buffer_i = 0; unsigned setting_i = 0; int found_setting = 0; char *line_buffer = &(buffer[0]); diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c index 99ff8df6cfe..ed4093adf74 100644 --- a/src/H5FDsec2.c +++ b/src/H5FDsec2.c @@ -692,7 +692,7 @@ H5FD__sec2_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU if (bytes_read > 0) offset += bytes_read; #else - bytes_read = HDread(file->fd, buf, bytes_in); + bytes_read = HDread(file->fd, buf, bytes_in); #endif /* H5_HAVE_PREADWRITE */ } while (-1 == bytes_read && EINTR == errno); diff --git a/src/H5FDstdio.c b/src/H5FDstdio.c index 00971e44bfb..b53dd3dc1ad 100644 --- a/src/H5FDstdio.c +++ b/src/H5FDstdio.c @@ -113,7 +113,7 @@ typedef struct H5FD_stdio_t { DWORD nFileIndexHigh; DWORD dwVolumeSerialNumber; - HANDLE hFile; /* Native windows file handle */ + HANDLE hFile; /* Native windows file handle */ #endif /* H5_HAVE_WIN32_API */ } H5FD_stdio_t; diff --git a/src/H5FDsubfiling/H5FDioc_priv.h b/src/H5FDsubfiling/H5FDioc_priv.h index 2f17dbe221d..c1b47661de5 100644 --- a/src/H5FDsubfiling/H5FDioc_priv.h +++ b/src/H5FDsubfiling/H5FDioc_priv.h @@ -65,74 +65,65 @@ #define H5FD_IOC__IO_Q_ENTRY_MAGIC 0x1357 -/* clang-format off */ - -#define H5FD_IOC__Q_APPEND(q_ptr, entry_ptr) \ -do { \ - assert(q_ptr); \ - assert((q_ptr)->magic == H5FD_IOC__IO_Q_MAGIC); \ - assert((((q_ptr)->q_len == 0) && ((q_ptr)->q_head == NULL) && ((q_ptr)->q_tail == NULL)) || \ - (((q_ptr)->q_len > 0) && ((q_ptr)->q_head != NULL) && ((q_ptr)->q_tail != NULL))); \ - assert(entry_ptr); \ - assert((entry_ptr)->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC); \ - assert((entry_ptr)->next == NULL); \ - assert((entry_ptr)->prev == NULL); \ - assert((entry_ptr)->in_progress == false); \ - \ - if ( ((q_ptr)->q_head) == NULL ) \ - { \ - ((q_ptr)->q_head) = (entry_ptr); \ - ((q_ptr)->q_tail) = (entry_ptr); \ - } \ - else \ - { \ - ((q_ptr)->q_tail)->next = (entry_ptr); \ - (entry_ptr)->prev = ((q_ptr)->q_tail); \ - ((q_ptr)->q_tail) = (entry_ptr); \ - } \ - ((q_ptr)->q_len)++; \ -} while ( false ) /* H5FD_IOC__Q_APPEND() */ - -#define H5FD_IOC__Q_REMOVE(q_ptr, entry_ptr) \ -do { \ - assert(q_ptr); \ - assert((q_ptr)->magic == H5FD_IOC__IO_Q_MAGIC); \ - assert((((q_ptr)->q_len == 1) && ((q_ptr)->q_head ==((q_ptr)->q_tail)) && ((q_ptr)->q_head == (entry_ptr))) || \ - (((q_ptr)->q_len > 0) && ((q_ptr)->q_head != NULL) && ((q_ptr)->q_tail != NULL))); \ - assert(entry_ptr); \ - assert((entry_ptr)->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC); \ - assert((((q_ptr)->q_len == 1) && ((entry_ptr)->next == NULL) && ((entry_ptr)->prev == NULL)) || \ - (((q_ptr)->q_len > 1) && (((entry_ptr)->next != NULL) || ((entry_ptr)->prev != NULL)))); \ - assert((entry_ptr)->in_progress == true); \ - \ - { \ - if ( (((q_ptr)->q_head)) == (entry_ptr) ) \ - { \ - (((q_ptr)->q_head)) = (entry_ptr)->next; \ - if ( (((q_ptr)->q_head)) != NULL ) \ - (((q_ptr)->q_head))->prev = NULL; \ - } \ - else \ - { \ - (entry_ptr)->prev->next = (entry_ptr)->next; \ - } \ - if (((q_ptr)->q_tail) == (entry_ptr) ) \ - { \ - ((q_ptr)->q_tail) = (entry_ptr)->prev; \ - if ( ((q_ptr)->q_tail) != NULL ) \ - ((q_ptr)->q_tail)->next = NULL; \ - } \ - else \ - { \ - (entry_ptr)->next->prev = (entry_ptr)->prev; \ - } \ - (entry_ptr)->next = NULL; \ - (entry_ptr)->prev = NULL; \ - ((q_ptr)->q_len)--; \ - } \ -} while ( false ) /* H5FD_IOC__Q_REMOVE() */ - -/* clang-format on */ +#define H5FD_IOC__Q_APPEND(q_ptr, entry_ptr) \ + do { \ + assert(q_ptr); \ + assert((q_ptr)->magic == H5FD_IOC__IO_Q_MAGIC); \ + assert((((q_ptr)->q_len == 0) && ((q_ptr)->q_head == NULL) && ((q_ptr)->q_tail == NULL)) || \ + (((q_ptr)->q_len > 0) && ((q_ptr)->q_head != NULL) && ((q_ptr)->q_tail != NULL))); \ + assert(entry_ptr); \ + assert((entry_ptr)->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC); \ + assert((entry_ptr)->next == NULL); \ + assert((entry_ptr)->prev == NULL); \ + assert((entry_ptr)->in_progress == false); \ + \ + if (((q_ptr)->q_head) == NULL) { \ + ((q_ptr)->q_head) = (entry_ptr); \ + ((q_ptr)->q_tail) = (entry_ptr); \ + } \ + else { \ + ((q_ptr)->q_tail)->next = (entry_ptr); \ + (entry_ptr)->prev = ((q_ptr)->q_tail); \ + ((q_ptr)->q_tail) = (entry_ptr); \ + } \ + ((q_ptr)->q_len)++; \ + } while (false) /* H5FD_IOC__Q_APPEND() */ + +#define H5FD_IOC__Q_REMOVE(q_ptr, entry_ptr) \ + do { \ + assert(q_ptr); \ + assert((q_ptr)->magic == H5FD_IOC__IO_Q_MAGIC); \ + assert((((q_ptr)->q_len == 1) && ((q_ptr)->q_head == ((q_ptr)->q_tail)) && \ + ((q_ptr)->q_head == (entry_ptr))) || \ + (((q_ptr)->q_len > 0) && ((q_ptr)->q_head != NULL) && ((q_ptr)->q_tail != NULL))); \ + assert(entry_ptr); \ + assert((entry_ptr)->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC); \ + assert((((q_ptr)->q_len == 1) && ((entry_ptr)->next == NULL) && ((entry_ptr)->prev == NULL)) || \ + (((q_ptr)->q_len > 1) && (((entry_ptr)->next != NULL) || ((entry_ptr)->prev != NULL)))); \ + assert((entry_ptr)->in_progress == true); \ + \ + { \ + if ((((q_ptr)->q_head)) == (entry_ptr)) { \ + (((q_ptr)->q_head)) = (entry_ptr)->next; \ + if ((((q_ptr)->q_head)) != NULL) \ + (((q_ptr)->q_head))->prev = NULL; \ + } \ + else { \ + (entry_ptr)->prev->next = (entry_ptr)->next; \ + } \ + if (((q_ptr)->q_tail) == (entry_ptr)) { \ + ((q_ptr)->q_tail) = (entry_ptr)->prev; \ + if (((q_ptr)->q_tail) != NULL) \ + ((q_ptr)->q_tail)->next = NULL; \ + } \ + else { \ + (entry_ptr)->next->prev = (entry_ptr)->prev; \ + } \ + (entry_ptr)->next = NULL; \ + (entry_ptr)->prev = NULL; \ + ((q_ptr)->q_len)--; \ + } \ + } while (false) /* H5FD_IOC__Q_REMOVE() */ /**************************************************************************** * diff --git a/src/H5FDsubfiling/H5FDsubfiling.c b/src/H5FDsubfiling/H5FDsubfiling.c index 796654254ad..159b7175589 100644 --- a/src/H5FDsubfiling/H5FDsubfiling.c +++ b/src/H5FDsubfiling/H5FDsubfiling.c @@ -2050,8 +2050,6 @@ H5FD__subfiling_io_helper(H5FD_subfiling_t *file, size_t io_count, H5FD_mem_t ty H5_subfiling_dump_iovecs(sf_context, ioreq_count, iovec_len, io_type, io_types, io_addrs, io_sizes, io_bufs); #endif - - /* clang-format off */ /* * Having now populated the I/O vectors for this I/O request and * having determined how many I/O calls need to be made to satisfy @@ -2080,7 +2078,6 @@ H5FD__subfiling_io_helper(H5FD_subfiling_t *file, size_t io_count, H5FD_mem_t ty * ultimately responsible for mapping each I/O vector to its corresponding * subfile (here, pointed to by '->' to the right of each I/O vector). */ - /* clang-format on */ for (size_t ioreq_idx = 0; ioreq_idx < ioreq_count; ioreq_idx++) { H5_flexible_const_ptr_t *io_bufs_ptr = NULL; H5FD_mem_t *io_types_ptr = NULL; diff --git a/src/H5Faccum.c b/src/H5Faccum.c index 9c4c8cdbbda..5fabf5266a0 100644 --- a/src/H5Faccum.c +++ b/src/H5Faccum.c @@ -725,7 +725,7 @@ H5F__accum_write(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t s /* Make certain that data in accumulator is visible before new write */ if ((H5F_SHARED_INTENT(f_sh) & H5F_ACC_SWMR_WRITE) > 0) /* Flush if dirty and reset accumulator */ - if (H5F__accum_reset(f_sh, true) < 0) + if (H5F__accum_reset(f_sh, true, false) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTRESET, FAIL, "can't reset accumulator"); /* Write the data */ @@ -776,7 +776,7 @@ H5F__accum_write(H5F_shared_t *f_sh, H5FD_mem_t map_type, haddr_t addr, size_t s } /* end if */ else { /* Access covers whole accumulator */ /* Reset accumulator, but don't flush */ - if (H5F__accum_reset(f_sh, false) < 0) + if (H5F__accum_reset(f_sh, false, false) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTRESET, FAIL, "can't reset accumulator"); } /* end else */ } /* end if */ @@ -1039,7 +1039,7 @@ H5F__accum_flush(H5F_shared_t *f_sh) *------------------------------------------------------------------------- */ herr_t -H5F__accum_reset(H5F_shared_t *f_sh, bool flush) +H5F__accum_reset(H5F_shared_t *f_sh, bool flush, bool force) { herr_t ret_value = SUCCEED; /* Return value */ @@ -1050,8 +1050,11 @@ H5F__accum_reset(H5F_shared_t *f_sh, bool flush) /* Flush any dirty data in accumulator, if requested */ if (flush) - if (H5F__accum_flush(f_sh) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "can't flush metadata accumulator"); + if (H5F__accum_flush(f_sh) < 0) { + HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "can't flush metadata accumulator"); + if (!force) + HGOTO_DONE(FAIL); + } /* Check if we need to reset the metadata accumulator information */ if (f_sh->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA) { diff --git a/src/H5Fint.c b/src/H5Fint.c index f653e0b71f0..0764bb43beb 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -296,23 +296,19 @@ H5F__set_vol_conn(H5F_t *file) HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get VOL connector info from API context"); /* Sanity check */ - assert(0 != connector_prop.connector_id); - - /* Retrieve the connector for the ID */ - if (NULL == (file->shared->vol_cls = (H5VL_class_t *)H5I_object(connector_prop.connector_id))) - HGOTO_ERROR(H5E_FILE, H5E_BADTYPE, FAIL, "not a VOL connector ID"); + assert(connector_prop.connector); /* Allocate and copy connector info, if it exists */ if (connector_prop.connector_info) - if (H5VL_copy_connector_info(file->shared->vol_cls, &new_connector_info, + if (H5VL_copy_connector_info(connector_prop.connector, &new_connector_info, connector_prop.connector_info) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTCOPY, FAIL, "connector info copy failed"); - /* Cache the connector ID & info for the container */ - file->shared->vol_id = connector_prop.connector_id; + /* Cache the connector & info for the container */ + file->shared->vol_conn = connector_prop.connector; file->shared->vol_info = new_connector_info; - if (H5I_inc_ref(file->shared->vol_id, false) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTINC, FAIL, "incrementing VOL connector ID failed"); + if (H5VL_conn_inc_rc(file->shared->vol_conn) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTINC, FAIL, "incrementing VOL connector refcount failed"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -452,7 +448,7 @@ H5F_get_access_plist(H5F_t *f, bool app_ref) HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set file driver ID & info"); /* Set the VOL connector property */ - connector_prop.connector_id = f->shared->vol_id; + connector_prop.connector = f->shared->vol_conn; connector_prop.connector_info = f->shared->vol_info; if (H5P_set(new_plist, H5F_ACS_VOL_CONN_NAME, &connector_prop) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set VOL connector ID & info"); @@ -1559,7 +1555,7 @@ H5F__dest(H5F_t *f, bool flush, bool free_on_failure) } /* end if */ /* Destroy other components of the file */ - if (H5F__accum_reset(f->shared, true) < 0) + if (H5F__accum_reset(f->shared, true, true) < 0) /* Push error, but keep going*/ HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "problems closing file"); if (H5FO_dest(f) < 0) @@ -1580,14 +1576,13 @@ H5F__dest(H5F_t *f, bool flush, bool free_on_failure) /* Clean up the cached VOL connector ID & info */ if (f->shared->vol_info) - if (H5VL_free_connector_info(f->shared->vol_id, f->shared->vol_info) < 0) + if (H5VL_free_connector_info(f->shared->vol_conn, f->shared->vol_info) < 0) /* Push error, but keep going*/ HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "unable to release VOL connector info object"); - if (f->shared->vol_id > 0) - if (H5I_dec_ref(f->shared->vol_id) < 0) + if (f->shared->vol_conn) + if (H5VL_conn_dec_rc(f->shared->vol_conn) < 0) /* Push error, but keep going*/ - HDONE_ERROR(H5E_FILE, H5E_CANTDEC, FAIL, "can't close VOL connector ID"); - f->shared->vol_cls = NULL; + HDONE_ERROR(H5E_FILE, H5E_CANTDEC, FAIL, "can't close VOL connector"); /* Close the file */ if (H5FD_close(f->shared->lf) < 0) @@ -2242,7 +2237,7 @@ H5F__post_open(H5F_t *f) assert(f); /* Store a vol object in the file struct */ - if (NULL == (f->vol_obj = H5VL_create_object_using_vol_id(H5I_FILE, f, f->shared->vol_id))) + if (NULL == (f->vol_obj = H5VL_new_vol_obj(H5I_FILE, f, f->shared->vol_conn, true))) HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "can't create VOL object"); done: @@ -3723,19 +3718,19 @@ H5F_get_metadata_read_retry_info(H5F_t *file, H5F_retry_info_t *info) herr_t H5F__start_swmr_write(H5F_t *f) { - bool ci_load = false; /* whether MDC ci load requested */ - bool ci_write = false; /* whether MDC CI write requested */ - size_t grp_dset_count = 0; /* # of open objects: groups & datasets */ - size_t nt_attr_count = 0; /* # of opened named datatypes + opened attributes */ - hid_t *obj_ids = NULL; /* List of ids */ - hid_t *obj_apl_ids = NULL; /* List of access property lists */ - H5G_loc_t *obj_glocs = NULL; /* Group location of the object */ - H5O_loc_t *obj_olocs = NULL; /* Object location */ - H5G_name_t *obj_paths = NULL; /* Group hierarchy path */ - size_t u; /* Local index variable */ - bool setup = false; /* Boolean flag to indicate whether SWMR setting is enabled */ - H5VL_t *vol_connector = NULL; /* VOL connector for the file */ - herr_t ret_value = SUCCEED; /* Return value */ + bool ci_load = false; /* whether MDC ci load requested */ + bool ci_write = false; /* whether MDC CI write requested */ + size_t grp_dset_count = 0; /* # of open objects: groups & datasets */ + size_t nt_attr_count = 0; /* # of opened named datatypes + opened attributes */ + hid_t *obj_ids = NULL; /* List of ids */ + hid_t *obj_apl_ids = NULL; /* List of access property lists */ + H5G_loc_t *obj_glocs = NULL; /* Group location of the object */ + H5O_loc_t *obj_olocs = NULL; /* Object location */ + H5G_name_t *obj_paths = NULL; /* Group hierarchy path */ + size_t u; /* Local index variable */ + bool setup = false; /* Boolean flag to indicate whether SWMR setting is enabled */ + H5VL_connector_t *vol_connector = NULL; /* VOL connector for the file */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -3816,7 +3811,7 @@ H5F__start_swmr_write(H5F_t *f) HGOTO_ERROR(H5E_FILE, H5E_BADTYPE, FAIL, "invalid object identifier"); /* Get the (top) connector for the ID */ - vol_connector = vol_obj->connector; + vol_connector = H5VL_OBJ_CONNECTOR(vol_obj); } /* end if */ /* Gather information about opened objects (groups, datasets) in the file */ @@ -3893,7 +3888,7 @@ H5F__start_swmr_write(H5F_t *f) } /* end if */ /* Flush and reset the accumulator */ - if (H5F__accum_reset(f->shared, true) < 0) + if (H5F__accum_reset(f->shared, true, false) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTRESET, FAIL, "can't reset accumulator"); /* Turn on SWMR write in shared file open flags */ diff --git a/src/H5Fio.c b/src/H5Fio.c index 2cd8a53ba53..3d50b50fc51 100644 --- a/src/H5Fio.c +++ b/src/H5Fio.c @@ -422,7 +422,7 @@ H5F_flush_tagged_metadata(H5F_t *f, haddr_t tag) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush tagged metadata"); /* Flush and reset the accumulator */ - if (H5F__accum_reset(f->shared, true) < 0) + if (H5F__accum_reset(f->shared, true, false) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTRESET, FAIL, "can't reset accumulator"); /* Flush file buffers to disk. */ diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index f60841ec55f..275fa378f0e 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -311,9 +311,8 @@ struct H5F_shared_t { uint64_t rfic_flags; /* Relaxed file integrity check (RFIC) flags */ /* Cached VOL connector ID & info */ - hid_t vol_id; /* ID of VOL connector for the container */ - const H5VL_class_t *vol_cls; /* Pointer to VOL connector class for the container */ - void *vol_info; /* Copy of VOL connector info for container */ + H5VL_connector_t *vol_conn; /* VOL connector for the container */ + void *vol_info; /* Copy of VOL connector info for container */ /* File space allocation information */ H5F_fspace_strategy_t fs_strategy; /* File space handling strategy */ @@ -445,7 +444,7 @@ H5_DLL herr_t H5F__accum_write(H5F_shared_t *f_sh, H5FD_mem_t type, haddr_t addr const void *buf); H5_DLL herr_t H5F__accum_free(H5F_shared_t *f, H5FD_mem_t type, haddr_t addr, hsize_t size); H5_DLL herr_t H5F__accum_flush(H5F_shared_t *f_sh); -H5_DLL herr_t H5F__accum_reset(H5F_shared_t *f_sh, bool flush); +H5_DLL herr_t H5F__accum_reset(H5F_shared_t *f_sh, bool flush, bool force); /* Shared file list related routines */ H5_DLL herr_t H5F__sfile_add(H5F_shared_t *shared); diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index a4ad311a189..5d9f5011927 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -98,7 +98,6 @@ typedef struct H5F_t H5F_t; #define H5F_NULL_FSM_ADDR(F) ((F)->shared->null_fsm_addr) #define H5F_GET_MIN_DSET_OHDR(F) ((F)->shared->crt_dset_min_ohdr_flag) #define H5F_SET_MIN_DSET_OHDR(F, V) ((F)->shared->crt_dset_min_ohdr_flag = (V)) -#define H5F_VOL_CLS(F) ((F)->shared->vol_cls) #define H5F_VOL_OBJ(F) ((F)->vol_obj) #define H5F_USE_FILE_LOCKING(F) ((F)->shared->use_file_locking) #define H5F_RFIC_FLAGS(F) ((F)->shared->rfic_flags) @@ -163,7 +162,6 @@ typedef struct H5F_t H5F_t; #define H5F_NULL_FSM_ADDR(F) (H5F_get_null_fsm_addr(F)) #define H5F_GET_MIN_DSET_OHDR(F) (H5F_get_min_dset_ohdr(F)) #define H5F_SET_MIN_DSET_OHDR(F, V) (H5F_set_min_dset_ohdr((F), (V))) -#define H5F_VOL_CLS(F) (H5F_get_vol_cls(F)) #define H5F_VOL_OBJ(F) (H5F_get_vol_obj(F)) #define H5F_USE_FILE_LOCKING(F) (H5F_get_use_file_locking(F)) #define H5F_RFIC_FLAGS(F) (H5F_get_rfic_flags(F)) @@ -527,10 +525,9 @@ H5_DLL bool H5F_get_point_of_no_return(const H5F_t *f); H5_DLL bool H5F_get_null_fsm_addr(const H5F_t *f); H5_DLL bool H5F_get_min_dset_ohdr(const H5F_t *f); H5_DLL herr_t H5F_set_min_dset_ohdr(H5F_t *f, bool minimize); -H5_DLL const H5VL_class_t *H5F_get_vol_cls(const H5F_t *f); -H5_DLL H5VL_object_t *H5F_get_vol_obj(const H5F_t *f); -H5_DLL bool H5F_get_use_file_locking(const H5F_t *f); -H5_DLL uint64_t H5F_get_rfic_flags(const H5F_t *f); +H5_DLL H5VL_object_t *H5F_get_vol_obj(const H5F_t *f); +H5_DLL bool H5F_get_use_file_locking(const H5F_t *f); +H5_DLL uint64_t H5F_get_rfic_flags(const H5F_t *f); /* Functions than retrieve values set/cached from the superblock/FCPL */ H5_DLL haddr_t H5F_get_base_addr(const H5F_t *f); diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h index 1e5a84cdb09..53dfaa78c31 100644 --- a/src/H5Fpublic.h +++ b/src/H5Fpublic.h @@ -189,10 +189,11 @@ typedef enum H5F_libver_t { H5F_LIBVER_V112 = 3, /**< Use the latest v112 format for storing objects */ H5F_LIBVER_V114 = 4, /**< Use the latest v114 format for storing objects */ H5F_LIBVER_V116 = 5, /**< Use the latest v116 format for storing objects */ + H5F_LIBVER_V118 = 6, /**< Use the latest v118 format for storing objects */ H5F_LIBVER_NBOUNDS /**< Sentinel */ } H5F_libver_t; -#define H5F_LIBVER_LATEST H5F_LIBVER_V116 +#define H5F_LIBVER_LATEST H5F_LIBVER_V118 /** * File space handling strategy diff --git a/src/H5Fquery.c b/src/H5Fquery.c index 63c96d531d0..526795a9870 100644 --- a/src/H5Fquery.c +++ b/src/H5Fquery.c @@ -1280,26 +1280,6 @@ H5F_get_null_fsm_addr(const H5F_t *f) FUNC_LEAVE_NOAPI(f->shared->null_fsm_addr) } /* end H5F_get_null_fsm_addr() */ -/*------------------------------------------------------------------------- - * Function: H5F_get_vol_cls - * - * Purpose: Get the VOL class for the file - * - * Return: VOL class pointer for file, can't fail - * - *------------------------------------------------------------------------- - */ -const H5VL_class_t * -H5F_get_vol_cls(const H5F_t *f) -{ - FUNC_ENTER_NOAPI_NOINIT_NOERR - - assert(f); - assert(f->shared); - - FUNC_LEAVE_NOAPI(f->shared->vol_cls) -} /* end H5F_get_vol_cls */ - /*------------------------------------------------------------------------- * Function: H5F_get_vol_obj * diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c index 6c4e7d57ef2..39c92f34b17 100644 --- a/src/H5Fsuper.c +++ b/src/H5Fsuper.c @@ -72,6 +72,7 @@ static const unsigned HDF5_superblock_ver_bounds[] = { HDF5_SUPERBLOCK_VERSION_3, /* H5F_LIBVER_V112 */ HDF5_SUPERBLOCK_VERSION_3, /* H5F_LIBVER_V114 */ HDF5_SUPERBLOCK_VERSION_3, /* H5F_LIBVER_V116 */ + HDF5_SUPERBLOCK_VERSION_3, /* H5F_LIBVER_V118 */ HDF5_SUPERBLOCK_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5G.c b/src/H5G.c index 88d617afc3d..8919788c809 100644 --- a/src/H5G.c +++ b/src/H5G.c @@ -184,7 +184,7 @@ H5G__create_api_common(hid_t loc_id, const char *name, hid_t lcpl_id, hid_t gcpl HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, H5I_INVALID_HID, "unable to create group"); /* Get an ID for the group */ - if ((ret_value = H5VL_register(H5I_GROUP, grp, (*vol_obj_ptr)->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_GROUP, grp, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), true)) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to get ID for group handle"); done: @@ -266,7 +266,7 @@ H5Gcreate_async(const char *app_file, const char *app_func, unsigned app_line, h /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE9(__func__, "*s*sIui*siiii", app_file, app_func, app_line, loc_id, name, lcpl_id, gcpl_id, gapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) @@ -349,7 +349,7 @@ H5Gcreate_anon(hid_t loc_id, hid_t gcpl_id, hid_t gapl_id) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, H5I_INVALID_HID, "unable to create group"); /* Get an ID for the group */ - if ((ret_value = H5VL_register(H5I_GROUP, grp, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_GROUP, grp, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to get ID for group handle"); done: @@ -399,7 +399,7 @@ H5G__open_api_common(hid_t loc_id, const char *name, hid_t gapl_id, void **token HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open group"); /* Register an ID for the group */ - if ((ret_value = H5VL_register(H5I_GROUP, grp, (*vol_obj_ptr)->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_GROUP, grp, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), true)) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register group"); done: @@ -472,7 +472,7 @@ H5Gopen_async(const char *app_file, const char *app_func, unsigned app_line, hid /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIui*sii", app_file, app_func, app_line, loc_id, name, gapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) @@ -624,7 +624,7 @@ H5Gget_info_async(const char *app_file, const char *app_func, unsigned app_line, /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE6(__func__, "*s*sIui*GIi", app_file, app_func, app_line, loc_id, group_info, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -730,7 +730,7 @@ H5Gget_info_by_name_async(const char *app_file, const char *app_func, unsigned a /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE8(__func__, "*s*sIui*s*GIii", app_file, app_func, app_line, loc_id, name, group_info, lapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -841,7 +841,7 @@ H5Gget_info_by_idx_async(const char *app_file, const char *app_func, unsigned ap /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE11(__func__, "*s*sIui*sIiIoh*GIii", app_file, app_func, app_line, loc_id, group_name, idx_type, order, n, group_info, lapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -893,11 +893,11 @@ H5Gclose(hid_t group_id) herr_t H5Gclose_async(const char *app_file, const char *app_func, unsigned app_line, hid_t group_id, hid_t es_id) { - H5VL_object_t *vol_obj = NULL; /* Object for loc_id */ - H5VL_t *connector = NULL; /* VOL connector */ - void *token = NULL; /* Request token for async operation */ - void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_object_t *vol_obj = NULL; /* Object for loc_id */ + H5VL_connector_t *connector = NULL; /* VOL connector */ + void *token = NULL; /* Request token for async operation */ + void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -913,7 +913,7 @@ H5Gclose_async(const char *app_file, const char *app_func, unsigned app_line, hi /* Increase connector's refcount, so it doesn't get closed if closing * the group closes the file */ - connector = vol_obj->connector; + connector = H5VL_OBJ_CONNECTOR(vol_obj); H5VL_conn_inc_rc(connector); /* Point at token for operation to set up */ @@ -929,7 +929,7 @@ H5Gclose_async(const char *app_file, const char *app_func, unsigned app_line, hi /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, group_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, FAIL, "can't insert token into event set"); diff --git a/src/H5Gdeprec.c b/src/H5Gdeprec.c index 5f9ad63756e..287f609553e 100644 --- a/src/H5Gdeprec.c +++ b/src/H5Gdeprec.c @@ -214,7 +214,7 @@ H5Gcreate1(hid_t loc_id, const char *name, size_t size_hint) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, H5I_INVALID_HID, "unable to create group"); /* Get an ID for the group */ - if ((ret_value = H5VL_register(H5I_GROUP, grp, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_GROUP, grp, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register group"); done: @@ -271,7 +271,7 @@ H5Gopen1(hid_t loc_id, const char *name) HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open group"); /* Get an ID for the group */ - if ((ret_value = H5VL_register(H5I_GROUP, grp, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_GROUP, grp, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register group"); done: @@ -312,10 +312,10 @@ H5Glink(hid_t cur_loc_id, H5G_link_t type, const char *cur_name, const char *new if (type == H5L_TYPE_HARD) { H5VL_object_t *vol_obj; /* Object of loc_id */ H5VL_loc_params_t new_loc_params; - H5VL_object_t tmp_vol_obj; /* Temporary object */ /* Set up new location struct */ new_loc_params.type = H5VL_OBJECT_BY_NAME; + new_loc_params.obj_type = H5I_get_type(cur_loc_id); new_loc_params.loc_data.loc_by_name.name = new_name; new_loc_params.loc_data.loc_by_name.lapl_id = H5P_LINK_ACCESS_DEFAULT; @@ -323,20 +323,16 @@ H5Glink(hid_t cur_loc_id, H5G_link_t type, const char *cur_name, const char *new if (NULL == (vol_obj = H5VL_vol_object(cur_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); - /* Construct a temporary VOL object */ - tmp_vol_obj.data = NULL; - tmp_vol_obj.connector = vol_obj->connector; - /* Set up VOL callback arguments */ vol_cb_args.op_type = H5VL_LINK_CREATE_HARD; - vol_cb_args.args.hard.curr_obj = vol_obj->data; + vol_cb_args.args.hard.curr_obj = H5VL_OBJ_DATA(vol_obj); vol_cb_args.args.hard.curr_loc_params.type = H5VL_OBJECT_BY_NAME; vol_cb_args.args.hard.curr_loc_params.obj_type = H5I_get_type(cur_loc_id); vol_cb_args.args.hard.curr_loc_params.loc_data.loc_by_name.name = cur_name; vol_cb_args.args.hard.curr_loc_params.loc_data.loc_by_name.lapl_id = H5P_LINK_ACCESS_DEFAULT; /* Create the link through the VOL */ - if (H5VL_link_create(&vol_cb_args, &tmp_vol_obj, &new_loc_params, H5P_LINK_CREATE_DEFAULT, + if (H5VL_link_create(&vol_cb_args, vol_obj, &new_loc_params, H5P_LINK_CREATE_DEFAULT, H5P_LINK_ACCESS_DEFAULT, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to create link"); } /* end if */ @@ -416,7 +412,7 @@ H5Glink2(hid_t cur_loc_id, const char *cur_name, H5G_link_t type, hid_t new_loc_ /* Set up VOL callback arguments */ vol_cb_args.op_type = H5VL_LINK_CREATE_HARD; - vol_cb_args.args.hard.curr_obj = vol_obj1->data; + vol_cb_args.args.hard.curr_obj = H5VL_OBJ_DATA(vol_obj1); vol_cb_args.args.hard.curr_loc_params.type = H5VL_OBJECT_BY_NAME; vol_cb_args.args.hard.curr_loc_params.obj_type = H5I_get_type(cur_loc_id); vol_cb_args.args.hard.curr_loc_params.loc_data.loc_by_name.name = cur_name; diff --git a/src/H5Gloc.c b/src/H5Gloc.c index 897debcb1bd..7bb59ea9e2f 100644 --- a/src/H5Gloc.c +++ b/src/H5Gloc.c @@ -233,7 +233,7 @@ H5G_loc_real(void *obj, H5I_type_t type, H5G_loc_t *loc) case H5I_BADID: case H5I_NTYPES: default: - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid location ID"); + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid location type"); } /* end switch */ done: @@ -608,8 +608,7 @@ H5G__loc_exists_cb(H5G_loc_t H5_ATTR_UNUSED *grp_loc /*in*/, const char H5_ATTR_ * * Purpose: Check if an object actually exists at a location * - * Return: Success: true/false - * Failure: Negative + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ diff --git a/src/H5Gpublic.h b/src/H5Gpublic.h index 38880e787b7..d3dd64e7a7b 100644 --- a/src/H5Gpublic.h +++ b/src/H5Gpublic.h @@ -127,8 +127,8 @@ H5_DLL hid_t H5Gcreate2(hid_t loc_id, const char *name, hid_t lcpl_id, hid_t gcp H5_DLL hid_t H5Gcreate_async(const char *app_file, const char *app_func, unsigned app_line, hid_t loc_id, const char *name, hid_t lcpl_id, hid_t gcpl_id, hid_t gapl_id, hid_t es_id); #else -H5_DLL hid_t H5Gcreate_async(hid_t loc_id, const char *name, hid_t lcpl_id, hid_t gcpl_id, hid_t gapl_id, - hid_t es_id); +H5_DLL hid_t H5Gcreate_async(hid_t loc_id, const char *name, hid_t lcpl_id, hid_t gcpl_id, hid_t gapl_id, + hid_t es_id); #endif /** @@ -222,7 +222,7 @@ H5_DLL hid_t H5Gopen2(hid_t loc_id, const char *name, hid_t gapl_id); H5_DLL hid_t H5Gopen_async(const char *app_file, const char *app_func, unsigned app_line, hid_t loc_id, const char *name, hid_t gapl_id, hid_t es_id); #else -H5_DLL hid_t H5Gopen_async(hid_t loc_id, const char *name, hid_t gapl_id, hid_t es_id); +H5_DLL hid_t H5Gopen_async(hid_t loc_id, const char *name, hid_t gapl_id, hid_t es_id); #endif /** diff --git a/src/H5Idbg.c b/src/H5Idbg.c index 7910b1ce43d..ee963fd1823 100644 --- a/src/H5Idbg.c +++ b/src/H5Idbg.c @@ -78,6 +78,7 @@ H5I__id_dump_cb(void *_item, void H5_ATTR_UNUSED *_key, void *_udata) H5I_type_t type = *(H5I_type_t *)_udata; /* User data */ const H5G_name_t *path = NULL; /* Path to file object */ void *object = NULL; /* Pointer to VOL connector object */ + bool is_native; /* Whether an object using the native VOL connector */ FUNC_ENTER_PACKAGE_NOERR @@ -91,8 +92,9 @@ H5I__id_dump_cb(void *_item, void H5_ATTR_UNUSED *_key, void *_udata) case H5I_GROUP: { const H5VL_object_t *vol_obj = (const H5VL_object_t *)info->u.c_object; - object = H5VL_object_data(vol_obj); - if (H5_VOL_NATIVE == vol_obj->connector->cls->value) + is_native = false; + H5VL_object_is_native(vol_obj, &is_native); + if (is_native) path = H5G_nameof(object); break; } @@ -100,8 +102,9 @@ H5I__id_dump_cb(void *_item, void H5_ATTR_UNUSED *_key, void *_udata) case H5I_DATASET: { const H5VL_object_t *vol_obj = (const H5VL_object_t *)info->u.c_object; - object = H5VL_object_data(vol_obj); - if (H5_VOL_NATIVE == vol_obj->connector->cls->value) + is_native = false; + H5VL_object_is_native(vol_obj, &is_native); + if (is_native) path = H5D_nameof(object); break; } diff --git a/src/H5Iint.c b/src/H5Iint.c index 709b9450802..57a6d9f708f 100644 --- a/src/H5Iint.c +++ b/src/H5Iint.c @@ -36,7 +36,7 @@ /****************/ /* Combine a Type number and an ID index into an ID */ -#define H5I_MAKE(g, i) ((((hid_t)(g)&TYPE_MASK) << ID_BITS) | ((hid_t)(i)&ID_MASK)) +#define H5I_MAKE(g, i) ((((hid_t)(g) & TYPE_MASK) << ID_BITS) | ((hid_t)(i) & ID_MASK)) /******************/ /* Local Typedefs */ diff --git a/src/H5L.c b/src/H5L.c index 3616cb75a59..6b1e7fe9947 100644 --- a/src/H5L.c +++ b/src/H5L.c @@ -48,7 +48,7 @@ static herr_t H5L__create_soft_api_common(const char *link_target, hid_t link_lo H5VL_object_t **_vol_obj_ptr); static herr_t H5L__create_hard_api_common(hid_t cur_loc_id, const char *cur_name, hid_t new_loc_id, const char *new_name, hid_t lcpl_id, hid_t lapl_id, - void **token_ptr, H5VL_object_t **_vol_obj_ptr); + void **token_ptr, H5VL_connector_t **conn); static herr_t H5L__delete_api_common(hid_t loc_id, const char *name, hid_t lapl_id, void **token_ptr, H5VL_object_t **_vol_obj_ptr); static herr_t H5L__delete_by_idx_api_common(hid_t loc_id, const char *group_name, H5_index_t idx_type, @@ -94,7 +94,6 @@ H5Lmove(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *ds H5VL_object_t *vol_obj2 = NULL; /* Object of dst_id */ H5VL_loc_params_t loc_params1; H5VL_loc_params_t loc_params2; - H5VL_object_t tmp_vol_obj; /* Temporary object */ H5I_type_t src_id_type = H5I_BADID, dst_id_type = H5I_BADID; herr_t ret_value = SUCCEED; /* Return value */ @@ -157,31 +156,19 @@ H5Lmove(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *ds /* Make sure that the VOL connectors are the same */ if (vol_obj1 && vol_obj2) { - int same_connector = 0; + htri_t same_connector; /* Check if both objects are associated with the same VOL connector */ - if (H5VL_cmp_connector_cls(&same_connector, vol_obj1->connector->cls, vol_obj2->connector->cls) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); - if (same_connector) + if ((same_connector = + H5VL_conn_same_class(H5VL_OBJ_CONNECTOR(vol_obj1), H5VL_OBJ_CONNECTOR(vol_obj2))) < 0) + HGOTO_ERROR(H5E_LINK, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); + if (!same_connector) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "Objects are accessed through different VOL connectors and can't be linked"); } - /* Construct a temporary source VOL object */ - if (vol_obj1) { - tmp_vol_obj.connector = vol_obj1->connector; - tmp_vol_obj.data = vol_obj1->data; - } - else { - if (NULL == vol_obj2) - HGOTO_ERROR(H5E_LINK, H5E_BADVALUE, FAIL, "NULL VOL object"); - - tmp_vol_obj.connector = vol_obj2->connector; - tmp_vol_obj.data = NULL; - } - /* Move the link */ - if (H5VL_link_move(&tmp_vol_obj, &loc_params1, vol_obj2, &loc_params2, lcpl_id, lapl_id, + if (H5VL_link_move(vol_obj1, &loc_params1, vol_obj2, &loc_params2, lcpl_id, lapl_id, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) HGOTO_ERROR(H5E_LINK, H5E_CANTMOVE, FAIL, "unable to move link"); @@ -208,7 +195,6 @@ H5Lcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *ds H5VL_loc_params_t loc_params1; H5VL_object_t *vol_obj2 = NULL; /* Object of dst_id */ H5VL_loc_params_t loc_params2; - H5VL_object_t tmp_vol_obj; /* Temporary object */ H5I_type_t src_id_type = H5I_BADID, dst_id_type = H5I_BADID; herr_t ret_value = SUCCEED; /* Return value */ @@ -271,31 +257,19 @@ H5Lcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *ds /* Make sure that the VOL connectors are the same */ if (vol_obj1 && vol_obj2) { - int same_connector = 0; + htri_t same_connector; /* Check if both objects are associated with the same VOL connector */ - if (H5VL_cmp_connector_cls(&same_connector, vol_obj1->connector->cls, vol_obj2->connector->cls) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); - if (same_connector) + if ((same_connector = + H5VL_conn_same_class(H5VL_OBJ_CONNECTOR(vol_obj1), H5VL_OBJ_CONNECTOR(vol_obj2))) < 0) + HGOTO_ERROR(H5E_LINK, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); + if (!same_connector) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "Objects are accessed through different VOL connectors and can't be linked"); } /* end if */ - /* Construct a temporary source VOL object */ - if (vol_obj1) { - tmp_vol_obj.connector = vol_obj1->connector; - tmp_vol_obj.data = vol_obj1->data; - } /* end if */ - else { - if (NULL == vol_obj2) - HGOTO_ERROR(H5E_LINK, H5E_BADVALUE, FAIL, "NULL VOL object pointer"); - - tmp_vol_obj.connector = vol_obj2->connector; - tmp_vol_obj.data = NULL; - } /* end else */ - /* Copy the link */ - if (H5VL_link_copy(&tmp_vol_obj, &loc_params1, vol_obj2, &loc_params2, lcpl_id, lapl_id, + if (H5VL_link_copy(vol_obj1, &loc_params1, vol_obj2, &loc_params2, lcpl_id, lapl_id, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) HGOTO_ERROR(H5E_LINK, H5E_CANTMOVE, FAIL, "unable to copy link"); @@ -427,7 +401,7 @@ H5Lcreate_soft_async(const char *app_file, const char *app_func, unsigned app_li /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE9(__func__, "*s*sIu*si*siii", app_file, app_func, app_line, link_target, link_loc_id, link_name, lcpl_id, lapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_LINK, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -447,15 +421,11 @@ H5Lcreate_soft_async(const char *app_file, const char *app_func, unsigned app_li */ static herr_t H5L__create_hard_api_common(hid_t cur_loc_id, const char *cur_name, hid_t link_loc_id, const char *link_name, - hid_t lcpl_id, hid_t lapl_id, void **token_ptr, H5VL_object_t **_vol_obj_ptr) + hid_t lcpl_id, hid_t lapl_id, void **token_ptr, H5VL_connector_t **connector) { - H5VL_object_t *curr_vol_obj = NULL; /* Object of cur_loc_id */ - H5VL_object_t *link_vol_obj = NULL; /* Object of link_loc_id */ - H5VL_object_t tmp_vol_obj; /* Temporary object */ - H5VL_object_t *tmp_vol_obj_ptr = &tmp_vol_obj; /* Ptr to temporary object */ - H5VL_object_t **tmp_vol_obj_ptr_ptr = - (_vol_obj_ptr ? _vol_obj_ptr : &tmp_vol_obj_ptr); /* Ptr to ptr to temporary object */ - H5VL_link_create_args_t vol_cb_args; /* Arguments to VOL callback */ + H5VL_object_t *curr_vol_obj = NULL; /* Object of cur_loc_id */ + H5VL_object_t *link_vol_obj = NULL; /* Object of link_loc_id */ + H5VL_link_create_args_t vol_cb_args; /* Arguments to VOL callback */ H5VL_loc_params_t link_loc_params; /* Location parameters for link_loc_id object access */ herr_t ret_value = SUCCEED; /* Return value */ @@ -486,12 +456,6 @@ H5L__create_hard_api_common(hid_t cur_loc_id, const char *cur_name, hid_t link_l if (H5CX_set_apl(&lapl_id, H5P_CLS_LACC, cur_loc_id, true) < 0) HGOTO_ERROR(H5E_LINK, H5E_CANTSET, FAIL, "can't set access property list info"); - /* Set up new location struct */ - link_loc_params.type = H5VL_OBJECT_BY_NAME; - link_loc_params.obj_type = H5I_get_type(link_loc_id); - link_loc_params.loc_data.loc_by_name.name = link_name; - link_loc_params.loc_data.loc_by_name.lapl_id = lapl_id; - if (H5L_SAME_LOC != cur_loc_id) /* Get the current location object */ if (NULL == (curr_vol_obj = H5VL_vol_object(cur_loc_id))) @@ -503,34 +467,26 @@ H5L__create_hard_api_common(hid_t cur_loc_id, const char *cur_name, hid_t link_l /* Make sure that the VOL connectors are the same */ if (curr_vol_obj && link_vol_obj) { - int same_connector = 0; + htri_t same_connector; /* Check if both objects are associated with the same VOL connector */ - if (H5VL_cmp_connector_cls(&same_connector, curr_vol_obj->connector->cls, - link_vol_obj->connector->cls) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); - if (same_connector) + if ((same_connector = H5VL_conn_same_class(H5VL_OBJ_CONNECTOR(curr_vol_obj), + H5VL_OBJ_CONNECTOR(link_vol_obj))) < 0) + HGOTO_ERROR(H5E_LINK, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); + if (!same_connector) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "Objects are accessed through different VOL connectors and can't be linked"); } /* end if */ - /* Construct a temporary VOL object */ - if (curr_vol_obj) - (*tmp_vol_obj_ptr_ptr)->connector = curr_vol_obj->connector; - else { - if (NULL == link_vol_obj) - HGOTO_ERROR(H5E_LINK, H5E_BADVALUE, FAIL, "NULL VOL object pointer"); - - (*tmp_vol_obj_ptr_ptr)->connector = link_vol_obj->connector; - } /* end else */ - if (link_vol_obj) - (*tmp_vol_obj_ptr_ptr)->data = link_vol_obj->data; - else - (*tmp_vol_obj_ptr_ptr)->data = NULL; + /* Set up new location struct */ + link_loc_params.type = H5VL_OBJECT_BY_NAME; + link_loc_params.obj_type = (link_vol_obj ? H5I_get_type(link_loc_id) : H5I_get_type(cur_loc_id)); + link_loc_params.loc_data.loc_by_name.name = link_name; + link_loc_params.loc_data.loc_by_name.lapl_id = lapl_id; /* Set up VOL callback arguments */ vol_cb_args.op_type = H5VL_LINK_CREATE_HARD; - vol_cb_args.args.hard.curr_obj = (curr_vol_obj ? curr_vol_obj->data : NULL); + vol_cb_args.args.hard.curr_obj = (curr_vol_obj ? H5VL_OBJ_DATA(curr_vol_obj) : NULL); vol_cb_args.args.hard.curr_loc_params.type = H5VL_OBJECT_BY_NAME; vol_cb_args.args.hard.curr_loc_params.obj_type = (H5L_SAME_LOC != cur_loc_id ? H5I_get_type(cur_loc_id) : H5I_BADID); @@ -538,10 +494,14 @@ H5L__create_hard_api_common(hid_t cur_loc_id, const char *cur_name, hid_t link_l vol_cb_args.args.hard.curr_loc_params.loc_data.loc_by_name.lapl_id = lapl_id; /* Create the link */ - if (H5VL_link_create(&vol_cb_args, *tmp_vol_obj_ptr_ptr, &link_loc_params, lcpl_id, lapl_id, - H5P_DATASET_XFER_DEFAULT, token_ptr) < 0) + if (H5VL_link_create(&vol_cb_args, (link_vol_obj ? link_vol_obj : curr_vol_obj), &link_loc_params, + lcpl_id, lapl_id, H5P_DATASET_XFER_DEFAULT, token_ptr) < 0) HGOTO_ERROR(H5E_LINK, H5E_CANTCREATE, FAIL, "unable to create hard link"); + /* Set the connector to use for async operations */ + if (connector) + *connector = (link_vol_obj ? H5VL_OBJ_CONNECTOR(link_vol_obj) : H5VL_OBJ_CONNECTOR(curr_vol_obj)); + done: FUNC_LEAVE_NOAPI(ret_value) } /* H5L__create_hard_api_common() */ @@ -595,11 +555,10 @@ H5Lcreate_hard_async(const char *app_file, const char *app_func, unsigned app_li const char *cur_name, hid_t new_loc_id, const char *new_name, hid_t lcpl_id, hid_t lapl_id, hid_t es_id) { - H5VL_object_t vol_obj; /* Object for loc_id */ - H5VL_object_t *vol_obj_ptr = &vol_obj; /* Pointer to object for loc_id */ - void *token = NULL; /* Request token for async operation */ - void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector = NULL; /* Connector for operation */ + void *token = NULL; /* Request token for async operation */ + void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -609,13 +568,14 @@ H5Lcreate_hard_async(const char *app_file, const char *app_func, unsigned app_li /* Creates a hard link asynchronously */ if (H5L__create_hard_api_common(cur_loc_id, cur_name, new_loc_id, new_name, lcpl_id, lapl_id, token_ptr, - &vol_obj_ptr) < 0) + &connector) < 0) HGOTO_ERROR(H5E_LINK, H5E_CANTCREATE, FAIL, "unable to asynchronously create hard link"); + assert(connector); /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj_ptr->connector, token, + if (H5ES_insert(es_id, connector, token, H5ARG_TRACE10(__func__, "*s*sIui*si*siii", app_file, app_func, app_line, cur_loc_id, cur_name, new_loc_id, new_name, lcpl_id, lapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_LINK, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -897,7 +857,7 @@ H5Ldelete_async(const char *app_file, const char *app_func, unsigned app_line, h /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIui*sii", app_file, app_func, app_line, loc_id, name, lapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_LINK, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1019,7 +979,7 @@ H5Ldelete_by_idx_async(const char *app_file, const char *app_func, unsigned app_ /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE10(__func__, "*s*sIui*sIiIohii", app_file, app_func, app_line, loc_id, group_name, idx_type, order, n, lapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_LINK, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1250,7 +1210,7 @@ H5Lexists_async(const char *app_file, const char *app_func, unsigned app_line, h /* If a token was created, add the token to the event set */ if (NULL != token) - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, /* clang-format off */ H5ARG_TRACE8(__func__, "*s*sIui*s*bii", app_file, app_func, app_line, loc_id, name, exists, lapl_id, es_id)) < 0) /* clang-format on */ @@ -1690,7 +1650,7 @@ H5Literate_async(const char *app_file, const char *app_func, unsigned app_line, /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE10(__func__, "*s*sIuiIiIo*hLI*xi", app_file, app_func, app_line, group_id, idx_type, order, idx_p, op, op_data, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_LINK, H5E_CANTINSERT, FAIL, "can't insert token into event set"); diff --git a/src/H5M.c b/src/H5M.c index bb8b4d9882b..c55f881d6d5 100644 --- a/src/H5M.c +++ b/src/H5M.c @@ -253,7 +253,7 @@ H5M__create_api_common(hid_t loc_id, const char *name, hid_t key_type_id, hid_t map = map_args.create.map; /* Get an ID for the map */ - if ((ret_value = H5VL_register(H5I_MAP, map, (*vol_obj_ptr)->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_MAP, map, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), true)) < 0) HGOTO_ERROR(H5E_MAP, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register map handle"); done: @@ -339,7 +339,7 @@ H5Mcreate_async(const char *app_file, const char *app_func, unsigned app_line, h /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE11(__func__, "*s*sIui*siiiiii", app_file, app_func, app_line, loc_id, name, key_type_id, val_type_id, lcpl_id, mcpl_id, mapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) @@ -420,7 +420,7 @@ H5Mcreate_anon(hid_t loc_id, hid_t key_type_id, hid_t val_type_id, hid_t mcpl_id map = map_args.create.map; /* Get an ID for the map */ - if ((ret_value = H5VL_register(H5I_MAP, map, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_MAP, map, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_MAP, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register map"); done: @@ -484,7 +484,7 @@ H5M__open_api_common(hid_t loc_id, const char *name, hid_t mapl_id, void **token map = map_args.open.map; /* Register an ID for the map */ - if ((ret_value = H5VL_register(H5I_MAP, map, (*vol_obj_ptr)->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_MAP, map, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), true)) < 0) HGOTO_ERROR(H5E_MAP, H5E_CANTREGISTER, H5I_INVALID_HID, "can't register map ID"); done: @@ -564,7 +564,7 @@ H5Mopen_async(const char *app_file, const char *app_func, unsigned app_line, hid /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIui*sii", app_file, app_func, app_line, loc_id, name, mapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) @@ -620,11 +620,11 @@ H5Mclose(hid_t map_id) herr_t H5Mclose_async(const char *app_file, const char *app_func, unsigned app_line, hid_t map_id, hid_t es_id) { - void *token = NULL; /* Request token for async operation */ - void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ - H5VL_object_t *vol_obj = NULL; /* VOL object of dset_id */ - H5VL_t *connector = NULL; /* VOL connector */ - herr_t ret_value = SUCCEED; /* Return value */ + void *token = NULL; /* Request token for async operation */ + void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ + H5VL_object_t *vol_obj = NULL; /* VOL object of dset_id */ + H5VL_connector_t *connector = NULL; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -640,7 +640,7 @@ H5Mclose_async(const char *app_file, const char *app_func, unsigned app_line, hi if (H5ES_NONE != es_id) { /* Increase connector's refcount, so it doesn't get closed if closing * the dataset closes the file */ - connector = vol_obj->connector; + connector = H5VL_OBJ_CONNECTOR(vol_obj); H5VL_conn_inc_rc(connector); /* Point at token for operation to set up */ @@ -656,7 +656,7 @@ H5Mclose_async(const char *app_file, const char *app_func, unsigned app_line, hi /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, map_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_MAP, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1017,7 +1017,7 @@ H5Mput_async(const char *app_file, const char *app_func, unsigned app_line, hid_ /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE10(__func__, "*s*sIuii*xi*xii", app_file, app_func, app_line, map_id, key_mem_type_id, key, val_mem_type_id, value, dxpl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_MAP, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1149,7 +1149,7 @@ H5Mget_async(const char *app_file, const char *app_func, unsigned app_line, hid_ /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE10(__func__, "*s*sIuii*xi*xii", app_file, app_func, app_line, map_id, key_mem_type_id, key, val_mem_type_id, value, dxpl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_MAP, H5E_CANTINSERT, FAIL, "can't insert token into event set"); diff --git a/src/H5Mpublic.h b/src/H5Mpublic.h index 3d8a8c7f7c8..f2d45720bfe 100644 --- a/src/H5Mpublic.h +++ b/src/H5Mpublic.h @@ -238,8 +238,8 @@ H5_DLL hid_t H5Mcreate_async(const char *app_file, const char *app_func, unsigne const char *name, hid_t key_type_id, hid_t val_type_id, hid_t lcpl_id, hid_t mcpl_id, hid_t mapl_id, hid_t es_id); #else -H5_DLL hid_t H5Mcreate_async(hid_t loc_id, const char *name, hid_t key_type_id, hid_t val_type_id, - hid_t lcpl_id, hid_t mcpl_id, hid_t mapl_id, hid_t es_id); +H5_DLL hid_t H5Mcreate_async(hid_t loc_id, const char *name, hid_t key_type_id, hid_t val_type_id, + hid_t lcpl_id, hid_t mcpl_id, hid_t mapl_id, hid_t es_id); #endif /** @@ -300,7 +300,7 @@ H5_DLL hid_t H5Mopen(hid_t loc_id, const char *name, hid_t mapl_id); H5_DLL hid_t H5Mopen_async(const char *app_file, const char *app_func, unsigned app_line, hid_t loc_id, const char *name, hid_t mapl_id, hid_t es_id); #else -H5_DLL hid_t H5Mopen_async(hid_t loc_id, const char *name, hid_t mapl_id, hid_t es_id); +H5_DLL hid_t H5Mopen_async(hid_t loc_id, const char *name, hid_t mapl_id, hid_t es_id); #endif /** diff --git a/src/H5O.c b/src/H5O.c index 39887b51a29..14118668d03 100644 --- a/src/H5O.c +++ b/src/H5O.c @@ -119,7 +119,7 @@ H5O__open_api_common(hid_t loc_id, const char *name, hid_t lapl_id, void **token HGOTO_ERROR(H5E_OHDR, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open object"); /* Get an atom for the object */ - if ((ret_value = H5VL_register(opened_type, opened_obj, (*vol_obj_ptr)->connector, true)) < 0) + if ((ret_value = H5VL_register(opened_type, opened_obj, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), true)) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to atomize object handle"); done: @@ -193,7 +193,7 @@ H5Oopen_async(const char *app_file, const char *app_func, unsigned app_line, hid /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIui*sii", app_file, app_func, app_line, loc_id, name, lapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) @@ -242,7 +242,7 @@ H5O__open_by_idx_api_common(hid_t loc_id, const char *group_name, H5_index_t idx HGOTO_ERROR(H5E_OHDR, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open object"); /* Get an ID for the object */ - if ((ret_value = H5VL_register(opened_type, opened_obj, (*vol_obj_ptr)->connector, true)) < 0) + if ((ret_value = H5VL_register(opened_type, opened_obj, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), true)) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register object handle"); done: @@ -320,7 +320,7 @@ H5Oopen_by_idx_async(const char *app_file, const char *app_func, unsigned app_li /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE10(__func__, "*s*sIui*sIiIohii", app_file, app_func, app_line, loc_id, group_name, idx_type, order, n, lapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) @@ -376,7 +376,7 @@ H5Oopen_by_token(hid_t loc_id, H5O_token_t token) HGOTO_ERROR(H5E_OHDR, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open object"); /* Register the object's ID */ - if ((ret_value = H5VL_register(opened_type, opened_obj, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(opened_type, opened_obj, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register object handle"); done: @@ -573,7 +573,7 @@ H5Ocopy_async(const char *app_file, const char *app_func, unsigned app_line, hid /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE10(__func__, "*s*sIui*si*siii", app_file, app_func, app_line, src_loc_id, src_name, dst_loc_id, dst_name, ocpypl_id, lcpl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -674,7 +674,7 @@ H5Oflush_async(const char *app_file, const char *app_func, unsigned app_line, hi /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, obj_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -775,7 +775,7 @@ H5Orefresh_async(const char *app_file, const char *app_func, unsigned app_line, /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, oid, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -807,7 +807,6 @@ H5Olink(hid_t obj_id, hid_t new_loc_id, const char *new_name, hid_t lcpl_id, hid { H5VL_object_t *vol_obj1 = NULL; /* object of obj_id */ H5VL_object_t *vol_obj2 = NULL; /* object of new_loc_id */ - H5VL_object_t tmp_vol_obj; /* Temporary object */ H5VL_link_create_args_t vol_cb_args; /* Arguments to VOL callback */ H5VL_loc_params_t new_loc_params; herr_t ret_value = SUCCEED; /* Return value */ @@ -855,29 +854,26 @@ H5Olink(hid_t obj_id, hid_t new_loc_id, const char *new_name, hid_t lcpl_id, hid /* Make sure that the VOL connectors are the same */ if (vol_obj1 && vol_obj2) { - int same_connector = 0; + htri_t same_connector; /* Check if both objects are associated with the same VOL connector */ - if (H5VL_cmp_connector_cls(&same_connector, vol_obj1->connector->cls, vol_obj2->connector->cls) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); - if (same_connector) + if ((same_connector = + H5VL_conn_same_class(H5VL_OBJ_CONNECTOR(vol_obj1), H5VL_OBJ_CONNECTOR(vol_obj2))) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); + if (!same_connector) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "Objects are accessed through different VOL connectors and can't be linked"); } /* end if */ - /* Construct a temporary VOL object */ - tmp_vol_obj.data = vol_obj2->data; - tmp_vol_obj.connector = vol_obj1->connector; - /* Set up VOL callback arguments */ vol_cb_args.op_type = H5VL_LINK_CREATE_HARD; - vol_cb_args.args.hard.curr_obj = vol_obj1->data; + vol_cb_args.args.hard.curr_obj = H5VL_OBJ_DATA(vol_obj1); vol_cb_args.args.hard.curr_loc_params.type = H5VL_OBJECT_BY_SELF; vol_cb_args.args.hard.curr_loc_params.obj_type = H5I_get_type(obj_id); /* Create a link to the object */ - if (H5VL_link_create(&vol_cb_args, &tmp_vol_obj, &new_loc_params, lcpl_id, lapl_id, - H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) + if (H5VL_link_create(&vol_cb_args, vol_obj2, &new_loc_params, lcpl_id, lapl_id, H5P_DATASET_XFER_DEFAULT, + H5_REQUEST_NULL) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTCREATE, FAIL, "unable to create link"); done: @@ -1193,7 +1189,7 @@ H5Oget_info_by_name_async(const char *app_file, const char *app_func, unsigned a /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE9(__func__, "*s*sIui*s*!Iuii", app_file, app_func, app_line, loc_id, name, oinfo, fields, lapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -1925,11 +1921,11 @@ H5Oclose(hid_t object_id) herr_t H5Oclose_async(const char *app_file, const char *app_func, unsigned app_line, hid_t object_id, hid_t es_id) { - H5VL_object_t *vol_obj = NULL; /* Object for loc_id */ - H5VL_t *connector = NULL; /* VOL connector */ - void *token = NULL; /* Request token for async operation */ - void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ - herr_t ret_value = SUCCEED; + H5VL_object_t *vol_obj = NULL; /* Object for loc_id */ + H5VL_connector_t *connector = NULL; /* VOL connector */ + void *token = NULL; /* Request token for async operation */ + void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ + herr_t ret_value = SUCCEED; FUNC_ENTER_API(FAIL) @@ -1945,7 +1941,7 @@ H5Oclose_async(const char *app_file, const char *app_func, unsigned app_line, hi /* Increase connector's refcount, so it doesn't get closed if closing * this object ID closes the file */ - connector = vol_obj->connector; + connector = H5VL_OBJ_CONNECTOR(vol_obj); H5VL_conn_inc_rc(connector); /* Point at token for operation to set up */ @@ -1961,7 +1957,7 @@ H5Oclose_async(const char *app_file, const char *app_func, unsigned app_line, hi /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, object_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert token into event set"); diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c index 6299b7583af..bd19cd34f44 100644 --- a/src/H5Ocopy.c +++ b/src/H5Ocopy.c @@ -1464,12 +1464,17 @@ H5O__copy_search_comm_dt(H5F_t *file_src, H5O_t *oh_src, H5O_loc_t *oloc_dst /*i /* Walk through the list of datatype suggestions */ while (suggestion) { + bool exists = false; + /* Find the object */ - if (H5G_loc_find(&dst_root_loc, suggestion->path, &obj_loc /*out*/) < 0) - /* Ignore errors - i.e. suggestions not present in - * destination file */ - H5E_clear_stack(); - else + if (H5G_loc_exists(&dst_root_loc, suggestion->path, &exists /*out*/) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTFIND, FAIL, "can't check object's existence"); + + if (exists) { + /* Retrieve the object location info */ + if (H5G_loc_find(&dst_root_loc, suggestion->path, &obj_loc /*out*/) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't retrieve object location"); + /* Check object and add to skip list if appropriate */ if (H5O__copy_search_comm_dt_check(&obj_oloc, &udata) < 0) { if (H5G_loc_free(&obj_loc) < 0) @@ -1477,9 +1482,10 @@ H5O__copy_search_comm_dt(H5F_t *file_src, H5O_t *oh_src, H5O_loc_t *oloc_dst /*i HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't check object"); } /* end if */ - /* Free location */ - if (H5G_loc_free(&obj_loc) < 0) - HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "can't free location"); + /* Free location */ + if (H5G_loc_free(&obj_loc) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "can't free location"); + } /* end if */ /* Advance the suggestion pointer */ suggestion = suggestion->next; diff --git a/src/H5Odeprec.c b/src/H5Odeprec.c index 37a3996c1e6..f56480392e4 100644 --- a/src/H5Odeprec.c +++ b/src/H5Odeprec.c @@ -378,7 +378,7 @@ H5Oopen_by_addr(hid_t loc_id, haddr_t addr) HGOTO_ERROR(H5E_OHDR, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open object"); /* Register the object's ID */ - if ((ret_value = H5VL_register(opened_type, opened_obj, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(opened_type, opened_obj, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register object handle"); done: diff --git a/src/H5Odtype.c b/src/H5Odtype.c index b2e6c8f65be..a359575a646 100644 --- a/src/H5Odtype.c +++ b/src/H5Odtype.c @@ -67,13 +67,13 @@ static herr_t H5O__dtype_debug(H5F_t *f, const void *_mesg, FILE *stream, int in /* If the version is too low, give an error. No error if nochange is set * because in that case we are either debugging or deleting the object header */ #define H5O_DTYPE_CHECK_VERSION(DT, VERS, MIN_VERS, IOF, CLASS, ERR) \ - if (((VERS) < (MIN_VERS)) && !(*(IOF)&H5O_DECODEIO_NOCHANGE)) \ + if (((VERS) < (MIN_VERS)) && !(*(IOF) & H5O_DECODEIO_NOCHANGE)) \ HGOTO_ERROR(H5E_DATATYPE, H5E_VERSION, ERR, "incorrect " CLASS " datatype version"); #else /* H5_STRICT_FORMAT_CHECKS */ /* If the version is too low and we are allowed to change the message, upgrade * it and mark the object header as dirty */ #define H5O_DTYPE_CHECK_VERSION(DT, VERS, MIN_VERS, IOF, CLASS, ERR) \ - if (((VERS) < (MIN_VERS)) && !(*(IOF)&H5O_DECODEIO_NOCHANGE)) { \ + if (((VERS) < (MIN_VERS)) && !(*(IOF) & H5O_DECODEIO_NOCHANGE)) { \ (VERS) = (MIN_VERS); \ if (H5T__upgrade_version((DT), (VERS)) < 0) \ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTSET, FAIL, "can't upgrade " CLASS " encoding version"); \ diff --git a/src/H5Ofill.c b/src/H5Ofill.c index 1b56be12580..2b1faac30ca 100644 --- a/src/H5Ofill.c +++ b/src/H5Ofill.c @@ -154,6 +154,7 @@ const unsigned H5O_fill_ver_bounds[] = { H5O_FILL_VERSION_3, /* H5F_LIBVER_V112 */ H5O_FILL_VERSION_3, /* H5F_LIBVER_V114 */ H5O_FILL_VERSION_3, /* H5F_LIBVER_V116 */ + H5O_FILL_VERSION_3, /* H5F_LIBVER_V118 */ H5O_FILL_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Oflush.c b/src/H5Oflush.c index dd11c25e7ca..7bd38f0eb89 100644 --- a/src/H5Oflush.c +++ b/src/H5Oflush.c @@ -194,11 +194,11 @@ H5O_refresh_metadata(H5O_loc_t *oloc, hid_t oid) /* If the file is opened with write access, no need to perform refresh actions. */ if (!(H5F_INTENT(oloc->file) & H5F_ACC_RDWR)) { - H5G_loc_t obj_loc; - H5O_loc_t obj_oloc; - H5G_name_t obj_path; - H5O_shared_t cached_H5O_shared; - H5VL_t *connector = NULL; + H5G_loc_t obj_loc; + H5O_loc_t obj_oloc; + H5G_name_t obj_path; + H5O_shared_t cached_H5O_shared; + H5VL_connector_t *connector = NULL; /* Hold a copy of the object's file pointer, since closing the object will * invalidate the file pointer in the oloc. @@ -227,27 +227,28 @@ H5O_refresh_metadata(H5O_loc_t *oloc, hid_t oid) */ if (NULL == (vol_obj = H5VL_vol_object(oid))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid object identifier"); - connector = vol_obj->connector; + connector = H5VL_OBJ_CONNECTOR(vol_obj); /* Bump the number of references on the VOL connector. * If you don't do this, VDS refreshes can accidentally close the connector. */ - connector->nrefs++; + H5VL_conn_inc_rc(connector); /* Close object & evict its metadata */ if (H5O__refresh_metadata_close(oloc, &obj_loc, oid) < 0) { - connector->nrefs--; + H5VL_conn_dec_rc(connector); HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unable to refresh object"); } /* Re-open the object, re-fetching its metadata */ if (H5O_refresh_metadata_reopen(oid, H5P_DEFAULT, &obj_loc, connector, false) < 0) { - connector->nrefs--; + H5VL_conn_dec_rc(connector); HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unable to refresh object"); } /* Restore the number of references on the VOL connector */ - connector->nrefs--; + if (H5VL_conn_dec_rc(connector) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTDEC, FAIL, "can't decrement reference count for connector"); /* Restore important datatype state */ if (H5I_get_type(oid) == H5I_DATATYPE) @@ -347,7 +348,7 @@ H5O__refresh_metadata_close(H5O_loc_t *oloc, H5G_loc_t *obj_loc, hid_t oid) *------------------------------------------------------------------------- */ herr_t -H5O_refresh_metadata_reopen(hid_t oid, hid_t apl_id, H5G_loc_t *obj_loc, H5VL_t *vol_connector, +H5O_refresh_metadata_reopen(hid_t oid, hid_t apl_id, H5G_loc_t *obj_loc, H5VL_connector_t *vol_connector, bool start_swmr) { void *object = NULL; /* Object for this operation */ diff --git a/src/H5Ofsinfo.c b/src/H5Ofsinfo.c index 6e559792530..674e3ff05ff 100644 --- a/src/H5Ofsinfo.c +++ b/src/H5Ofsinfo.c @@ -70,6 +70,7 @@ static const unsigned H5O_fsinfo_ver_bounds[] = { H5O_FSINFO_VERSION_1, /* H5F_LIBVER_V112 */ H5O_FSINFO_VERSION_1, /* H5F_LIBVER_V114 */ H5O_FSINFO_VERSION_1, /* H5F_LIBVER_V116 */ + H5O_FSINFO_VERSION_1, /* H5F_LIBVER_V118 */ H5O_FSINFO_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; #define N_FSINFO_VERSION_BOUNDS H5F_LIBVER_NBOUNDS diff --git a/src/H5Oint.c b/src/H5Oint.c index 2528fb93c68..4256f9cc79f 100644 --- a/src/H5Oint.c +++ b/src/H5Oint.c @@ -75,8 +75,8 @@ static herr_t H5O__obj_type_real(const H5O_t *oh, H5O_type_t *obj_type); static herr_t H5O__get_hdr_info_real(const H5O_t *oh, H5O_hdr_info_t *hdr); static herr_t H5O__free_visit_visited(void *item, void *key, void *operator_data /*in,out*/); static herr_t H5O__visit_cb(hid_t group, const char *name, const H5L_info2_t *linfo, void *_udata); -static const H5O_obj_class_t *H5O__obj_class_real(const H5O_t *oh); -static herr_t H5O__reset_info2(H5O_info2_t *oinfo); +static herr_t H5O__obj_class_real(const H5O_t *oh, const H5O_obj_class_t **cls); +static herr_t H5O__reset_info2(H5O_info2_t *oinfo); /*********************/ /* Package Variables */ @@ -128,6 +128,7 @@ const unsigned H5O_obj_ver_bounds[] = { H5O_VERSION_2, /* H5F_LIBVER_V112 */ H5O_VERSION_2, /* H5F_LIBVER_V114 */ H5O_VERSION_2, /* H5F_LIBVER_V116 */ + H5O_VERSION_2, /* H5F_LIBVER_V118 */ H5O_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; @@ -185,7 +186,7 @@ static const H5O_obj_class_t *const H5O_obj_class_g[] = { * Failure: negative *------------------------------------------------------------------------- */ -herr_t +H5_ATTR_CONST herr_t H5O_init(void) { herr_t ret_value = SUCCEED; /* Return value */ @@ -1594,37 +1595,36 @@ H5O_obj_type(const H5O_loc_t *loc, H5O_type_t *obj_type) /*------------------------------------------------------------------------- * Function: H5O__obj_type_real * - * Purpose: Returns the type of object pointed to by `oh'. + * Purpose: On success, returns the type of object pointed to by `oh' or + * NULL in *obj_type. *obj_type not defined on failure. * - * Return: Success: Non-negative - * Failure: Negative + * Return: Success: Non-negative + * Failure: Negative * *------------------------------------------------------------------------- */ static herr_t H5O__obj_type_real(const H5O_t *oh, H5O_type_t *obj_type) { - const H5O_obj_class_t *obj_class; /* Class of object for header */ + const H5O_obj_class_t *obj_class = NULL; /* Class of object for header */ + herr_t ret_value = SUCCEED; - FUNC_ENTER_PACKAGE_NOERR + FUNC_ENTER_PACKAGE /* Sanity check */ assert(oh); assert(obj_type); /* Look up class for object header */ - if (NULL == (obj_class = H5O__obj_class_real(oh))) { - /* Clear error stack from "failed" class lookup */ - H5E_clear_stack(); + if (H5O__obj_class_real(oh, &obj_class) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to determine object class"); + assert(obj_class); - /* Set type to "unknown" */ - *obj_type = H5O_TYPE_UNKNOWN; - } - else - /* Set object type */ - *obj_type = obj_class->type; + /* Set object type */ + *obj_type = obj_class->type; - FUNC_LEAVE_NOAPI(SUCCEED) +done: + FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__obj_type_real() */ /*------------------------------------------------------------------------- @@ -1650,7 +1650,7 @@ H5O__obj_class(const H5O_loc_t *loc) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header"); /* Test whether entry qualifies as a particular type of object */ - if (NULL == (ret_value = H5O__obj_class_real(oh))) + if (H5O__obj_class_real(oh, &ret_value) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "unable to determine object type"); done: @@ -1663,23 +1663,25 @@ H5O__obj_class(const H5O_loc_t *loc) /*------------------------------------------------------------------------- * Function: H5O__obj_class_real * - * Purpose: Returns the class of object pointed to by `oh'. + * Purpose: On success returns the class of object pointed to by `oh' or + * NULL in *cls. *cls not defined on failure. * - * Return: Success: An object class - * Failure: NULL + * Return: Success: Non-negative + * Failure: Negative * *------------------------------------------------------------------------- */ -static const H5O_obj_class_t * -H5O__obj_class_real(const H5O_t *oh) +static herr_t +H5O__obj_class_real(const H5O_t *oh, const H5O_obj_class_t **cls) { - size_t i; /* Local index variable */ - const H5O_obj_class_t *ret_value = NULL; /* Return value */ + size_t i; /* Local index variable */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE /* Sanity check */ assert(oh); + assert(cls); /* Test whether entry qualifies as a particular type of object */ /* (Note: loop is in reverse order, to test specific objects first) */ @@ -1687,13 +1689,15 @@ H5O__obj_class_real(const H5O_t *oh) htri_t isa; /* Is entry a particular type? */ if ((isa = (H5O_obj_class_g[i - 1]->isa)(oh)) < 0) - HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "unable to determine object type"); - else if (isa) - HGOTO_DONE(H5O_obj_class_g[i - 1]); + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to determine object type"); + else if (isa) { + *cls = H5O_obj_class_g[i - 1]; + break; + } } if (0 == i) - HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "unable to determine object type"); + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to determine object type"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -2071,9 +2075,8 @@ H5O__get_hdr_info_real(const H5O_t *oh, H5O_hdr_info_t *hdr) herr_t H5O_get_info(const H5O_loc_t *loc, H5O_info2_t *oinfo, unsigned fields) { - const H5O_obj_class_t *obj_class; /* Class of object for header */ - H5O_t *oh = NULL; /* Object header */ - herr_t ret_value = SUCCEED; /* Return value */ + H5O_t *oh = NULL; /* Object header */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_TAG(loc->addr, FAIL) @@ -2085,16 +2088,14 @@ H5O_get_info(const H5O_loc_t *loc, H5O_info2_t *oinfo, unsigned fields) if (NULL == (oh = H5O_protect(loc, H5AC__READ_ONLY_FLAG, false))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header"); - /* Get class for object */ - if (NULL == (obj_class = H5O__obj_class_real(oh))) - HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to determine object class"); - /* Reset the object info structure */ if (H5O__reset_info2(oinfo) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't reset object data struct"); /* Get basic information, if requested */ if (fields & H5O_INFO_BASIC) { + H5O_type_t obj_type = H5O_TYPE_UNKNOWN; /* Type of object */ + /* Retrieve the file's fileno */ H5F_GET_FILENO(loc->file, oinfo->fileno); @@ -2102,8 +2103,12 @@ H5O_get_info(const H5O_loc_t *loc, H5O_info2_t *oinfo, unsigned fields) if (H5VL_native_addr_to_token(loc->file, H5I_FILE, loc->addr, &oinfo->token) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "can't serialize address into object token"); - /* Retrieve the type of the object */ - oinfo->type = obj_class->type; + /* Get type of object */ + if (H5O__obj_type_real(oh, &obj_type) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to determine object type"); + + /* Set the type of the object */ + oinfo->type = obj_type; /* Set the object's reference count */ oinfo->rc = oh->nlink; @@ -2177,9 +2182,8 @@ H5O_get_info(const H5O_loc_t *loc, H5O_info2_t *oinfo, unsigned fields) herr_t H5O_get_native_info(const H5O_loc_t *loc, H5O_native_info_t *oinfo, unsigned fields) { - const H5O_obj_class_t *obj_class; /* Class of object for header */ - H5O_t *oh = NULL; /* Object header */ - herr_t ret_value = SUCCEED; /* Return value */ + H5O_t *oh = NULL; /* Object header */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_TAG(loc->addr, FAIL) @@ -2191,10 +2195,6 @@ H5O_get_native_info(const H5O_loc_t *loc, H5O_native_info_t *oinfo, unsigned fie if (NULL == (oh = H5O_protect(loc, H5AC__READ_ONLY_FLAG, false))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header"); - /* Get class for object */ - if (NULL == (obj_class = H5O__obj_class_real(oh))) - HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to determine object class"); - /* Reset the object info structure */ memset(oinfo, 0, sizeof(*oinfo)); @@ -2205,6 +2205,12 @@ H5O_get_native_info(const H5O_loc_t *loc, H5O_native_info_t *oinfo, unsigned fie /* Get B-tree & heap metadata storage size, if requested */ if (fields & H5O_NATIVE_INFO_META_SIZE) { + const H5O_obj_class_t *obj_class = NULL; /* Class of object for header */ + + /* Get class for object */ + if (H5O__obj_class_real(oh, &obj_class) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to determine object class"); + /* Check for 'bh_info' callback for this type of object */ if (obj_class->bh_info) /* Call the object's class 'bh_info' routine */ @@ -2371,7 +2377,7 @@ H5O_obj_create(H5F_t *f, H5O_type_t obj_type, void *crt_info, H5G_loc_t *obj_loc * *------------------------------------------------------------------------- */ -haddr_t +H5_ATTR_PURE haddr_t H5O_get_oh_addr(const H5O_t *oh) { /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */ @@ -2388,7 +2394,7 @@ H5O_get_oh_addr(const H5O_t *oh) * *------------------------------------------------------------------------- */ -uint8_t +H5_ATTR_PURE uint8_t H5O_get_oh_flags(const H5O_t *oh) { FUNC_ENTER_NOAPI_NOINIT_NOERR @@ -2405,7 +2411,7 @@ H5O_get_oh_flags(const H5O_t *oh) * *------------------------------------------------------------------------- */ -time_t +H5_ATTR_PURE time_t H5O_get_oh_mtime(const H5O_t *oh) { FUNC_ENTER_NOAPI_NOINIT_NOERR @@ -2419,7 +2425,7 @@ H5O_get_oh_mtime(const H5O_t *oh) * *------------------------------------------------------------------------- */ -uint8_t +H5_ATTR_PURE uint8_t H5O_get_oh_version(const H5O_t *oh) { FUNC_ENTER_NOAPI_NOINIT_NOERR @@ -2837,7 +2843,7 @@ H5O_dec_rc_by_loc(const H5O_loc_t *loc) * *------------------------------------------------------------------------- */ -H5AC_proxy_entry_t * +H5_ATTR_PURE H5AC_proxy_entry_t * H5O_get_proxy(const H5O_t *oh) { FUNC_ENTER_NOAPI_NOINIT_NOERR @@ -2943,7 +2949,7 @@ H5O__reset_info2(H5O_info2_t *oinfo) * *------------------------------------------------------------------------- */ -bool +H5_ATTR_PURE bool H5O_has_chksum(const H5O_t *oh) { FUNC_ENTER_NOAPI_NOINIT_NOERR diff --git a/src/H5Opline.c b/src/H5Opline.c index 19af5442cb7..3e5eb4d7318 100644 --- a/src/H5Opline.c +++ b/src/H5Opline.c @@ -89,6 +89,7 @@ const unsigned H5O_pline_ver_bounds[] = { H5O_PLINE_VERSION_2, /* H5F_LIBVER_V112 */ H5O_PLINE_VERSION_2, /* H5F_LIBVER_V114 */ H5O_PLINE_VERSION_2, /* H5F_LIBVER_V116 */ + H5O_PLINE_VERSION_2, /* H5F_LIBVER_V118 */ H5O_PLINE_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h index 3f0ff07cd13..a5eebcf3985 100644 --- a/src/H5Oprivate.h +++ b/src/H5Oprivate.h @@ -980,8 +980,8 @@ H5_DLL herr_t H5O_msg_get_flags(const H5O_loc_t *loc, unsigned type_id, uint8_t H5_DLL herr_t H5O_flush(H5O_loc_t *oloc, hid_t obj_id); H5_DLL herr_t H5O_flush_common(H5O_loc_t *oloc, hid_t obj_id); H5_DLL herr_t H5O_refresh_metadata(H5O_loc_t *oloc, hid_t oid); -H5_DLL herr_t H5O_refresh_metadata_reopen(hid_t oid, hid_t apl_id, H5G_loc_t *obj_loc, H5VL_t *vol_driver, - bool start_swmr); +H5_DLL herr_t H5O_refresh_metadata_reopen(hid_t oid, hid_t apl_id, H5G_loc_t *obj_loc, + H5VL_connector_t *connector, bool start_swmr); /* Object copying routines */ H5_DLL herr_t H5O_copy_header_map(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */, diff --git a/src/H5Opublic.h b/src/H5Opublic.h index 28954d4cb45..498e2595254 100644 --- a/src/H5Opublic.h +++ b/src/H5Opublic.h @@ -276,7 +276,7 @@ H5_DLL hid_t H5Oopen(hid_t loc_id, const char *name, hid_t lapl_id); H5_DLL hid_t H5Oopen_async(const char *app_file, const char *app_func, unsigned app_line, hid_t loc_id, const char *name, hid_t lapl_id, hid_t es_id); #else -H5_DLL hid_t H5Oopen_async(hid_t loc_id, const char *name, hid_t lapl_id, hid_t es_id); +H5_DLL hid_t H5Oopen_async(hid_t loc_id, const char *name, hid_t lapl_id, hid_t es_id); #endif /** @@ -365,8 +365,8 @@ H5_DLL hid_t H5Oopen_by_idx_async(const char *app_file, const char *app_func, un const char *group_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, hid_t lapl_id, hid_t es_id); #else -H5_DLL hid_t H5Oopen_by_idx_async(hid_t loc_id, const char *group_name, H5_index_t idx_type, - H5_iter_order_t order, hsize_t n, hid_t lapl_id, hid_t es_id); +H5_DLL hid_t H5Oopen_by_idx_async(hid_t loc_id, const char *group_name, H5_index_t idx_type, + H5_iter_order_t order, hsize_t n, hid_t lapl_id, hid_t es_id); #endif /** diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index 03a65fb7870..f726f63e7b4 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -5748,20 +5748,20 @@ H5Pget_page_buffer_size(hid_t plist_id, size_t *buf_size /*out*/, unsigned *min_ *------------------------------------------------------------------------- */ herr_t -H5P_set_vol(H5P_genplist_t *plist, hid_t vol_id, const void *vol_info) +H5P_set_vol(H5P_genplist_t *plist, H5VL_connector_t *connector, const void *vol_info) { herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) - if (NULL == H5I_object_verify(vol_id, H5I_VOL)) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); + /* Sanity check */ + assert(connector); if (true == H5P_isa_class(plist->plist_id, H5P_FILE_ACCESS)) { H5VL_connector_prop_t vol_prop; /* Property for VOL ID & info */ /* Prepare the VOL connector property */ - vol_prop.connector_id = vol_id; + vol_prop.connector = connector; vol_prop.connector_info = vol_info; /* Set the connector ID & info property */ @@ -5822,19 +5822,20 @@ H5P_reset_vol_class(const H5P_genclass_t *pclass, const H5VL_connector_prop_t *v herr_t H5Pset_vol(hid_t plist_id, hid_t new_vol_id, const void *new_vol_info) { - H5P_genplist_t *plist; /* Property list pointer */ - herr_t ret_value = SUCCEED; /* Return value */ + H5P_genplist_t *plist; /* Property list pointer */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) /* Check arguments */ if (NULL == (plist = (H5P_genplist_t *)H5I_object_verify(plist_id, H5I_GENPROP_LST))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list"); - if (NULL == H5I_object_verify(new_vol_id, H5I_VOL)) + if (NULL == (connector = H5I_object_verify(new_vol_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file VOL ID"); /* Set the VOL */ - if (H5P_set_vol(plist, new_vol_id, new_vol_info) < 0) + if (H5P_set_vol(plist, connector, new_vol_info) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set VOL"); done: @@ -5875,12 +5876,9 @@ H5Pget_vol_id(hid_t plist_id, hid_t *vol_id /*out*/) if (H5P_peek(plist, H5F_ACS_VOL_CONN_NAME, &connector_prop) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get VOL connector info"); - /* Increment the VOL ID's ref count */ - if (H5I_inc_ref(connector_prop.connector_id, true) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTINC, FAIL, "unable to increment ref count on VOL connector ID"); - - /* Set the connector ID to return */ - *vol_id = connector_prop.connector_id; + /* Register an ID for the connector */ + if ((*vol_id = H5VL_conn_register(connector_prop.connector)) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINC, FAIL, "unable to increment ref count on VOL connector"); } /* end if */ else HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list"); @@ -5922,17 +5920,11 @@ H5Pget_vol_info(hid_t plist_id, void **vol_info /*out*/) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get VOL connector property"); /* Copy connector info, if it exists */ - if (connector_prop.connector_info) { - H5VL_class_t *connector; /* Pointer to connector */ - - /* Retrieve the connector for the ID */ - if (NULL == (connector = (H5VL_class_t *)H5I_object(connector_prop.connector_id))) - HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not a VOL connector ID"); - + if (connector_prop.connector_info) /* Allocate and copy connector info */ - if (H5VL_copy_connector_info(connector, &new_connector_info, connector_prop.connector_info) < 0) + if (H5VL_copy_connector_info(connector_prop.connector, &new_connector_info, + connector_prop.connector_info) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTCOPY, FAIL, "connector info copy failed"); - } /* end if */ /* Set the connector info */ *vol_info = new_connector_info; @@ -5993,7 +5985,7 @@ H5Pget_vol_cap_flags(hid_t plist_id, uint64_t *cap_flags) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get VOL connector property"); /* Query the capability flags */ - if (H5VL_get_cap_flags(&connector_prop, cap_flags) < 0) + if (H5VL_conn_prop_get_cap_flags(&connector_prop, cap_flags) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get VOL connector capability flags"); } else @@ -6021,8 +6013,8 @@ H5P__facc_vol_create(const char H5_ATTR_UNUSED *name, size_t H5_ATTR_UNUSED size FUNC_ENTER_PACKAGE - /* Make copy of the VOL connector */ - if (H5VL_conn_copy((H5VL_connector_prop_t *)value) < 0) + /* Make copy of the VOL connector property */ + if (H5VL_conn_prop_copy((H5VL_connector_prop_t *)value) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTCOPY, FAIL, "can't copy VOL connector"); done: @@ -6050,8 +6042,8 @@ H5P__facc_vol_set(hid_t H5_ATTR_UNUSED prop_id, const char H5_ATTR_UNUSED *name, /* Sanity check */ assert(value); - /* Make copy of VOL connector ID & info */ - if (H5VL_conn_copy((H5VL_connector_prop_t *)value) < 0) + /* Make copy of VOL connector property */ + if (H5VL_conn_prop_copy((H5VL_connector_prop_t *)value) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTCOPY, FAIL, "can't copy VOL connector"); done: @@ -6079,8 +6071,8 @@ H5P__facc_vol_get(hid_t H5_ATTR_UNUSED prop_id, const char H5_ATTR_UNUSED *name, /* Sanity check */ assert(value); - /* Make copy of VOL connector */ - if (H5VL_conn_copy((H5VL_connector_prop_t *)value) < 0) + /* Make copy of VOL connector property */ + if (H5VL_conn_prop_copy((H5VL_connector_prop_t *)value) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTCOPY, FAIL, "can't copy VOL connector"); done: @@ -6105,8 +6097,8 @@ H5P__facc_vol_del(hid_t H5_ATTR_UNUSED prop_id, const char H5_ATTR_UNUSED *name, FUNC_ENTER_PACKAGE - /* Free the VOL connector ID & info */ - if (H5VL_conn_free((H5VL_connector_prop_t *)value) < 0) + /* Free the VOL connector property */ + if (H5VL_conn_prop_free((H5VL_connector_prop_t *)value) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTRELEASE, FAIL, "can't release VOL connector"); done: @@ -6130,8 +6122,8 @@ H5P__facc_vol_copy(const char H5_ATTR_UNUSED *name, size_t H5_ATTR_UNUSED size, FUNC_ENTER_PACKAGE - /* Make copy of VOL connector */ - if (H5VL_conn_copy((H5VL_connector_prop_t *)value) < 0) + /* Make copy of VOL connector property */ + if (H5VL_conn_prop_copy((H5VL_connector_prop_t *)value) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTCOPY, FAIL, "can't copy VOL connector"); done: @@ -6152,48 +6144,25 @@ H5P__facc_vol_copy(const char H5_ATTR_UNUSED *name, size_t H5_ATTR_UNUSED size, *------------------------------------------------------------------------- */ static int -H5P__facc_vol_cmp(const void *_info1, const void *_info2, size_t H5_ATTR_UNUSED size) +H5P__facc_vol_cmp(const void *_prop1, const void *_prop2, size_t H5_ATTR_UNUSED size) { - const H5VL_connector_prop_t *info1 = - (const H5VL_connector_prop_t *)_info1; /* Create local aliases for values */ - const H5VL_connector_prop_t *info2 = (const H5VL_connector_prop_t *)_info2; - H5VL_class_t *cls1, *cls2; /* connector class for each property */ - int cmp_value = 0; /* Value from comparison */ + const H5VL_connector_prop_t *prop1 = + (const H5VL_connector_prop_t *)_prop1; /* Create local aliases for values */ + const H5VL_connector_prop_t *prop2 = (const H5VL_connector_prop_t *)_prop2; herr_t H5_ATTR_NDEBUG_UNUSED status; /* Status from info comparison */ int ret_value = 0; /* Return value */ FUNC_ENTER_PACKAGE_NOERR /* Sanity check */ - assert(info1); - assert(info2); + assert(prop1); + assert(prop2); assert(size == sizeof(H5VL_connector_prop_t)); - /* Compare connectors */ - if (NULL == (cls1 = (H5VL_class_t *)H5I_object(info1->connector_id))) - HGOTO_DONE(-1); - if (NULL == (cls2 = (H5VL_class_t *)H5I_object(info2->connector_id))) - HGOTO_DONE(1); - status = H5VL_cmp_connector_cls(&cmp_value, cls1, cls2); + /* Compare properties */ + status = H5VL_conn_prop_cmp(&ret_value, prop1, prop2); assert(status >= 0); - if (cmp_value != 0) - HGOTO_DONE(cmp_value); - /* At this point, we should be able to assume that we are dealing with - * the same connector class struct (or a copies of the same class struct) - */ - - /* Use one of the classes (cls1) info comparison routines to compare the - * info objects - */ - assert(cls1->info_cls.cmp == cls2->info_cls.cmp); - status = H5VL_cmp_connector_info(cls1, &cmp_value, info1->connector_info, info2->connector_info); - assert(status >= 0); - - /* Set return value */ - ret_value = cmp_value; - -done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5P__facc_vol_cmp() */ @@ -6214,8 +6183,8 @@ H5P__facc_vol_close(const char H5_ATTR_UNUSED *name, size_t H5_ATTR_UNUSED size, FUNC_ENTER_PACKAGE - /* Free the VOL connector */ - if (H5VL_conn_free((H5VL_connector_prop_t *)value) < 0) + /* Free the VOL connector property */ + if (H5VL_conn_prop_free((H5VL_connector_prop_t *)value) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTRELEASE, FAIL, "can't release VOL connector"); done: diff --git a/src/H5Pprivate.h b/src/H5Pprivate.h index 36c24579959..0671ed5952e 100644 --- a/src/H5Pprivate.h +++ b/src/H5Pprivate.h @@ -153,6 +153,7 @@ H5_DLLVAR const struct H5P_libclass_t H5P_CLS_OCPY[1]; /* Object copy */ /* Forward declaration of structs used below */ struct H5O_fill_t; struct H5T_t; +struct H5VL_connector_t; struct H5VL_connector_prop_t; /* Package initialization routines */ @@ -188,7 +189,7 @@ H5_DLL herr_t H5P_set_driver_by_name(H5P_genplist_t *plist, const char *dri const char *driver_config, bool app_ref); H5_DLL herr_t H5P_set_driver_by_value(H5P_genplist_t *plist, H5FD_class_value_t driver_value, const char *driver_config, bool app_ref); -H5_DLL herr_t H5P_set_vol(H5P_genplist_t *plist, hid_t vol_id, const void *vol_info); +H5_DLL herr_t H5P_set_vol(H5P_genplist_t *plist, struct H5VL_connector_t *connector, const void *vol_info); H5_DLL herr_t H5P_reset_vol_class(const H5P_genclass_t *pclass, const struct H5VL_connector_prop_t *vol_prop); H5_DLL herr_t H5P_set_vlen_mem_manager(H5P_genplist_t *plist, H5MM_allocate_t alloc_func, void *alloc_info, H5MM_free_t free_func, void *free_info); diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index ff46407a717..273250017a3 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -2499,8 +2499,8 @@ H5_DLL herr_t H5Pset_deflate(hid_t plist_id, unsigned level); * pipeline * \param[in] flags Bit vector specifying certain general properties of * the filter - * \param[in] cd_nelmts Number of elements in \p c_values - * \param[in] c_values Auxiliary data for the filter + * \param[in] cd_nelmts Number of elements in \p cd_values + * \param[in] cd_values Auxiliary data for the filter * * \return \herr_t * @@ -2756,7 +2756,7 @@ H5_DLL herr_t H5Pset_deflate(hid_t plist_id, unsigned level); * */ H5_DLL herr_t H5Pset_filter(hid_t plist_id, H5Z_filter_t filter, unsigned int flags, size_t cd_nelmts, - const unsigned int c_values[]); + const unsigned int cd_values[]); /** * \ingroup OCPL * @@ -5061,6 +5061,36 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * objects created with this setting.
\p low=#H5F_LIBVER_V116
+ * \p high=
+ * \li The library will create objects with the latest format + * versions available to library release 1.16.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release specified + * in the \p high value. + * \li API calls that create objects or features that are available + * to versions of the library greater than version specified in + * \p high will fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
\p low=#H5F_LIBVER_V118
+ * \p high=
+ * \li The library will create objects with the latest format + * versions available to library release 1.18.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release specified + * in the \p high value. + * \li API calls that create objects or features that are available + * to versions of the library greater than version specified in + * \p high will fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
\p low=high * \li The library will create objects with the latest format diff --git a/src/H5R.c b/src/H5R.c index ed589801db9..b532bf7426d 100644 --- a/src/H5R.c +++ b/src/H5R.c @@ -535,7 +535,7 @@ H5R__open_object_api_common(H5R_ref_t *ref_ptr, hid_t rapl_id, hid_t oapl_id, vo HGOTO_ERROR(H5E_REFERENCE, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open object by token"); /* Register object */ - if ((ret_value = H5VL_register(opened_type, opened_obj, (*vol_obj_ptr)->connector, true)) < 0) + if ((ret_value = H5VL_register(opened_type, opened_obj, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), true)) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register object handle"); done: @@ -598,7 +598,7 @@ H5Ropen_object_async(const char *app_file, const char *app_func, unsigned app_li /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIu*Rriii", app_file, app_func, app_line, ref_ptr, rapl_id, oapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) @@ -672,7 +672,7 @@ H5R__open_region_api_common(H5R_ref_t *ref_ptr, hid_t rapl_id, hid_t oapl_id, vo HGOTO_ERROR(H5E_REFERENCE, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open object by token"); /* Register object */ - if ((opened_obj_id = H5VL_register(opened_type, opened_obj, (*vol_obj_ptr)->connector, false)) < 0) + if ((opened_obj_id = H5VL_register(opened_type, opened_obj, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), false)) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register object handle"); /* Get VOL object object */ @@ -764,7 +764,7 @@ H5Ropen_region_async(const char *app_file, const char *app_func, unsigned app_li /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIu*Rriii", app_file, app_func, app_line, ref_ptr, rapl_id, oapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) @@ -840,7 +840,7 @@ H5R__open_attr_api_common(H5R_ref_t *ref_ptr, hid_t rapl_id, hid_t aapl_id, void HGOTO_ERROR(H5E_REFERENCE, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open object by token"); /* Register object */ - if ((opened_obj_id = H5VL_register(opened_type, opened_obj, (*vol_obj_ptr)->connector, false)) < 0) + if ((opened_obj_id = H5VL_register(opened_type, opened_obj, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), false)) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register object handle"); /* Verify access property list and set up collective metadata if appropriate */ @@ -863,7 +863,7 @@ H5R__open_attr_api_common(H5R_ref_t *ref_ptr, hid_t rapl_id, hid_t aapl_id, void H5R_REF_ATTRNAME((const H5R_ref_priv_t *)ref_ptr)); /* Register the attribute and get an ID for it */ - if ((ret_value = H5VL_register(H5I_ATTR, opened_attr, (*vol_obj_ptr)->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_ATTR, opened_attr, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), true)) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register attribute handle"); done: @@ -932,7 +932,7 @@ H5Ropen_attr_async(const char *app_file, const char *app_func, unsigned app_line /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIu*Rriii", app_file, app_func, app_line, ref_ptr, rapl_id, aapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) diff --git a/src/H5Rdeprec.c b/src/H5Rdeprec.c index 154d47cb508..921693770a2 100644 --- a/src/H5Rdeprec.c +++ b/src/H5Rdeprec.c @@ -368,7 +368,7 @@ H5Rdereference1(hid_t obj_id, H5R_type_t ref_type, const void *ref) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open object by token"); /* Register object */ - if ((ret_value = H5VL_register(opened_type, opened_obj, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(opened_type, opened_obj, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register object handle"); done: @@ -641,7 +641,7 @@ H5Rdereference2(hid_t obj_id, hid_t oapl_id, H5R_type_t ref_type, const void *re HGOTO_ERROR(H5E_REFERENCE, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open object by token"); /* Register object */ - if ((ret_value = H5VL_register(opened_type, opened_obj, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(opened_type, opened_obj, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register object handle"); done: diff --git a/src/H5Rint.c b/src/H5Rint.c index 35fc78d83e7..cbfa910d247 100644 --- a/src/H5Rint.c +++ b/src/H5Rint.c @@ -484,12 +484,12 @@ H5R__reopen_file(H5R_ref_priv_t *ref, hid_t fapl_id) /* Open the file */ /* (Must open file read-write to allow for object modifications) */ - if (NULL == (new_file = H5VL_file_open(&connector_prop, H5R_REF_FILENAME(ref), H5F_ACC_RDWR, fapl_id, - H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL))) + if (NULL == (new_file = H5VL_file_open(connector_prop.connector, H5R_REF_FILENAME(ref), H5F_ACC_RDWR, + fapl_id, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL))) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTOPENFILE, H5I_INVALID_HID, "unable to open file"); /* Get an ID for the file */ - if ((ret_value = H5VL_register_using_vol_id(H5I_FILE, new_file, connector_prop.connector_id, true)) < 0) + if ((ret_value = H5VL_register(H5I_FILE, new_file, connector_prop.connector, true)) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register file handle"); /* Get the file object */ diff --git a/src/H5S.c b/src/H5S.c index d6611654729..373115661b6 100644 --- a/src/H5S.c +++ b/src/H5S.c @@ -62,6 +62,7 @@ const unsigned H5O_sdspace_ver_bounds[] = { H5O_SDSPACE_VERSION_2, /* H5F_LIBVER_V112 */ H5O_SDSPACE_VERSION_2, /* H5F_LIBVER_V114 */ H5O_SDSPACE_VERSION_2, /* H5F_LIBVER_V116 */ + H5O_SDSPACE_VERSION_2, /* H5F_LIBVER_V118 */ H5O_SDSPACE_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5SM.c b/src/H5SM.c index 1c2d4e6caa7..59abd24e679 100644 --- a/src/H5SM.c +++ b/src/H5SM.c @@ -45,6 +45,13 @@ typedef struct H5SM_read_udata_t { void *encoding_buf; /* The encoded message (out) */ } H5SM_read_udata_t; +/* Typedef to increment a reference count in the B-tree */ +typedef struct { + H5SM_mesg_key_t *key; /* IN: key for message being incremented */ + bool found; /* OUT: if message was found */ + H5O_fheap_id_t fheap_id; /* OUT: fheap ID of record */ +} H5SM_incr_ref_opdata_t; + /********************/ /* Local Prototypes */ /********************/ @@ -1141,9 +1148,9 @@ H5SM_try_share(H5F_t *f, H5O_t *open_oh, unsigned defer_flags, unsigned type_id, static herr_t H5SM__incr_ref(void *record, void *_op_data, bool *changed) { - H5SM_sohm_t *message = (H5SM_sohm_t *)record; - H5SM_incr_ref_opdata *op_data = (H5SM_incr_ref_opdata *)_op_data; - herr_t ret_value = SUCCEED; + H5SM_sohm_t *message = (H5SM_sohm_t *)record; + H5SM_incr_ref_opdata_t *op_data = (H5SM_incr_ref_opdata_t *)_op_data; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE @@ -1174,9 +1181,9 @@ H5SM__incr_ref(void *record, void *_op_data, bool *changed) /* If we got here, the message has changed */ *changed = true; - /* Check for retrieving the heap ID */ - if (op_data) - op_data->fheap_id = message->u.heap_loc.fheap_id; + /* Set the heap ID and indicate it was found */ + op_data->fheap_id = message->u.heap_loc.fheap_id; + op_data->found = true; done: FUNC_LEAVE_NOAPI(ret_value) @@ -1326,24 +1333,24 @@ H5SM__write_mesg(H5F_t *f, H5O_t *open_oh, H5SM_index_header_t *header, bool def HGOTO_ERROR(H5E_SOHM, H5E_NOTFOUND, FAIL, "can't search for message in index"); } /* end if */ else { - H5SM_incr_ref_opdata op_data; + H5SM_incr_ref_opdata_t op_data; /* Set up callback info */ - op_data.key = &key; + op_data.key = &key; + op_data.found = false; - /* If this returns failure, it means that the message wasn't found. */ - /* If it succeeds, set the heap_id in the shared struct. It will - * return a heap ID, since a message with a reference count greater - * than 1 is always shared in the heap. + /* Set the heap_id in the shared struct, if the message was found. + * It will return a heap ID, since a message with a reference count + * greater than 1 is always shared in the heap. */ - if (H5B2_modify(bt2, &key, H5SM__incr_ref, &op_data) >= 0) { + if (H5B2_modify(bt2, &key, true, H5SM__incr_ref, &op_data) < 0) + HGOTO_ERROR(H5E_SOHM, H5E_CANTMODIFY, FAIL, "B-tree modification failed"); + if (op_data.found) { shared.u.heap_id = op_data.fheap_id; found = true; } /* end if */ - else - H5E_clear_stack(); /*ignore error*/ - } /* end else */ - } /* end else */ + } /* end else */ + } /* end else */ if (found) { /* If the message was found, it's shared in the heap (now). Set up a @@ -1807,7 +1814,7 @@ H5SM__delete_from_index(H5F_t *f, H5O_t *open_oh, H5SM_index_header_t *header, c /* If this returns failure, it means that the message wasn't found. * If it succeeds, a copy of the modified message will be returned. */ - if (H5B2_modify(bt2, &key, H5SM__decr_ref, &message) < 0) + if (H5B2_modify(bt2, &key, false, H5SM__decr_ref, &message) < 0) HGOTO_ERROR(H5E_SOHM, H5E_NOTFOUND, FAIL, "message not in index"); /* Point to the message */ diff --git a/src/H5SMpkg.h b/src/H5SMpkg.h index 4ff4c7da81f..dbeafc15139 100644 --- a/src/H5SMpkg.h +++ b/src/H5SMpkg.h @@ -201,24 +201,6 @@ typedef struct { * heap ID, the heap ID will be 0. */ } H5SM_mesg_key_t; -/* - * Data exchange structure to pass through the fractal heap layer for the - * H5HF_op function when computing a hash value for a message. - */ -typedef struct { - /* downward (internal) */ - unsigned type_id; /* Message type */ - - /* upward */ - uint32_t hash; /* Hash value */ -} H5SM_fh_ud_gh_t; - -/* Typedef to increment a reference count in the B-tree */ -typedef struct { - H5SM_mesg_key_t *key; /* IN: key for message being incremented */ - H5O_fheap_id_t fheap_id; /* OUT: fheap ID of record */ -} H5SM_incr_ref_opdata; - /* v2 B-tree client callback context */ typedef struct H5SM_bt2_ctx_t { uint8_t sizeof_addr; /* Size of file addresses */ diff --git a/src/H5Shyper.c b/src/H5Shyper.c index bc5d6c3db96..cbf519d3aab 100644 --- a/src/H5Shyper.c +++ b/src/H5Shyper.c @@ -242,6 +242,7 @@ static const unsigned H5O_sds_hyper_ver_bounds[] = { H5S_HYPER_VERSION_3, /* H5F_LIBVER_V112 */ H5S_HYPER_VERSION_3, /* H5F_LIBVER_V114 */ H5S_HYPER_VERSION_3, /* H5F_LIBVER_V116 */ + H5S_HYPER_VERSION_3, /* H5F_LIBVER_V118 */ H5S_HYPER_VERSION_3 /* H5F_LIBVER_LATEST */ }; @@ -268,7 +269,7 @@ static const H5S_sel_iter_class_t H5S_sel_iter_hyper[1] = {{ static const hsize_t H5S_hyper_zeros_g[H5S_MAX_RANK] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const hsize_t H5S_hyper_ones_g[H5S_MAX_RANK] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; /* Declare a free list to manage the H5S_hyper_sel_t struct */ H5FL_DEFINE_STATIC(H5S_hyper_sel_t); diff --git a/src/H5Spoint.c b/src/H5Spoint.c index e102969657b..6853dc3932a 100644 --- a/src/H5Spoint.c +++ b/src/H5Spoint.c @@ -127,6 +127,7 @@ static const unsigned H5O_sds_point_ver_bounds[] = { H5S_POINT_VERSION_2, /* H5F_LIBVER_V112 */ H5S_POINT_VERSION_2, /* H5F_LIBVER_V114 */ H5S_POINT_VERSION_2, /* H5F_LIBVER_V116 */ + H5S_POINT_VERSION_2, /* H5F_LIBVER_V118 */ H5S_POINT_VERSION_2 /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5T.c b/src/H5T.c index dcf0a679fed..1728132cf6b 100644 --- a/src/H5T.c +++ b/src/H5T.c @@ -610,6 +610,7 @@ const unsigned H5O_dtype_ver_bounds[] = { H5O_DTYPE_VERSION_4, /* H5F_LIBVER_V112 */ H5O_DTYPE_VERSION_4, /* H5F_LIBVER_V114 */ H5O_DTYPE_VERSION_4, /* H5F_LIBVER_V116 */ + H5O_DTYPE_VERSION_4, /* H5F_LIBVER_V118 */ H5O_DTYPE_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; @@ -2086,12 +2087,12 @@ H5Tclose(hid_t type_id) herr_t H5Tclose_async(const char *app_file, const char *app_func, unsigned app_line, hid_t type_id, hid_t es_id) { - H5T_t *dt; /* Pointer to datatype to close */ - void *token = NULL; /* Request token for async operation */ - void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ - H5VL_object_t *vol_obj = NULL; /* VOL object of dset_id */ - H5VL_t *connector = NULL; /* VOL connector */ - herr_t ret_value = SUCCEED; /* Return value */ + H5T_t *dt; /* Pointer to datatype to close */ + void *token = NULL; /* Request token for async operation */ + void **token_ptr = H5_REQUEST_NULL; /* Pointer to request token for async operation */ + H5VL_object_t *vol_obj = NULL; /* VOL object of dset_id */ + H5VL_connector_t *connector = NULL; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -2109,7 +2110,7 @@ H5Tclose_async(const char *app_file, const char *app_func, unsigned app_line, hi if (H5ES_NONE != es_id) { /* Increase connector's refcount, so it doesn't get closed if closing * the dataset closes the file */ - connector = vol_obj->connector; + connector = H5VL_OBJ_CONNECTOR(vol_obj); H5VL_conn_inc_rc(connector); /* Point at token for operation to set up */ @@ -2122,7 +2123,7 @@ H5Tclose_async(const char *app_file, const char *app_func, unsigned app_line, hi /* If a token was created, add the token to the event set */ if (NULL != token) - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE5(__func__, "*s*sIuii", app_file, app_func, app_line, type_id, es_id)) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINSERT, FAIL, "can't insert token into event set"); diff --git a/src/H5TSint.c b/src/H5TSint.c index a2761bb2ad3..c7600eb1fda 100644 --- a/src/H5TSint.c +++ b/src/H5TSint.c @@ -133,10 +133,12 @@ H5TS__init(void) /*------------------------------------------------------------------------- * Function: H5TS_term_package * - * Purpose: Terminate this interface. + * Purpose: Terminate this interface. Clean up global resources shared by + * all threads. * * Note: This function is currently registered via atexit() and is called - * AFTER H5_term_library(). + * AFTER H5_term_library(). H5TS_top_term_package() is called at library + * termination to clean up per-thread resources. * * Return: void * @@ -151,9 +153,6 @@ H5TS_term_package(void) H5TS_mutex_destroy(&H5TS_api_info_p.api_mutex); H5TS_atomic_destroy_uint(&H5TS_api_info_p.attempt_lock_count); - /* Clean up per-thread library info */ - H5TS__tinfo_term(); - FUNC_LEAVE_NOAPI_VOID } /* end H5TS_term_package() */ @@ -525,16 +524,42 @@ H5TS__tinfo_destroy(void *_tinfo_node) FUNC_ENTER_PACKAGE_NAMECHECK_ONLY if (tinfo_node) { - /* Add thread info node to the free list */ H5TS_mutex_lock(&H5TS_tinfo_mtx_s); + /* Add thread info node to the free list */ tinfo_node->next = H5TS_tinfo_next_free_s; H5TS_tinfo_next_free_s = tinfo_node; + /* Release resources held by error records in thread-local error stack */ + H5E__destroy_stack(&tinfo_node->info.err_stack); H5TS_mutex_unlock(&H5TS_tinfo_mtx_s); } FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY } +/*-------------------------------------------------------------------------- + * Function: H5TS_top_term_package + * + * Purpose: Terminate the threadlocal parts of the H5TS interface during library terminaton. + * + * Note: See H5TS_term_package for termination of the thread-global resources + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +int +H5TS_top_term_package(void) +{ + int n = 0; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + /* Clean up per-thread library info */ + H5TS__tinfo_term(); + + FUNC_LEAVE_NOAPI(n) +} + /*-------------------------------------------------------------------------- * Function: H5TS__tinfo_term * @@ -562,14 +587,13 @@ H5TS__tinfo_term(void) if (H5_UNLIKELY(H5TS_mutex_unlock(&H5TS_tinfo_mtx_s) < 0)) HGOTO_DONE(FAIL); - /* Release critical section / mutex for modifying the thread info globals */ - if (H5_UNLIKELY(H5TS_mutex_destroy(&H5TS_tinfo_mtx_s) < 0)) - HGOTO_DONE(FAIL); - /* Release key for thread-specific API contexts */ if (H5_UNLIKELY(H5TS_key_delete(H5TS_thrd_info_key_g) < 0)) HGOTO_DONE(FAIL); + /* Release critical section / mutex for modifying the thread info globals */ + if (H5_UNLIKELY(H5TS_mutex_destroy(&H5TS_tinfo_mtx_s) < 0)) + HGOTO_DONE(FAIL); done: FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) } /* end H5TS__tinfo_term() */ diff --git a/src/H5TSprivate.h b/src/H5TSprivate.h index d2be12414d6..64fd567a9bb 100644 --- a/src/H5TSprivate.h +++ b/src/H5TSprivate.h @@ -276,6 +276,7 @@ typedef atomic_flag H5TS_spinlock_t; #ifdef H5_HAVE_THREADSAFE /* Library/thread init/term operations */ H5_DLL void H5TS_term_package(void); +H5_DLL int H5TS_top_term_package(void); /* API locking */ H5_DLL herr_t H5TS_api_lock(void); diff --git a/src/H5Tcommit.c b/src/H5Tcommit.c index 92853c63058..d30ddd00bd4 100644 --- a/src/H5Tcommit.c +++ b/src/H5Tcommit.c @@ -59,7 +59,7 @@ static herr_t H5T__commit_api_common(hid_t loc_id, const char *name, hid_t type_ static hid_t H5T__open_api_common(hid_t loc_id, const char *name, hid_t tapl_id, void **token_ptr, H5VL_object_t **_vol_obj_ptr); static H5T_t *H5T__open_oid(const H5G_loc_t *loc); -static herr_t H5T_destruct_datatype(void *datatype, H5VL_t *vol_connector); +static herr_t H5T_destruct_datatype(void *datatype, H5VL_connector_t *vol_connector); /*********************/ /* Public Variables */ @@ -77,12 +77,6 @@ static herr_t H5T_destruct_datatype(void *datatype, H5VL_t *vol_connector); /* Local Variables */ /*******************/ -/* Declare a free list to manage the H5VL_t struct */ -H5FL_EXTERN(H5VL_t); - -/* Declare a free list to manage the H5VL_object_t struct */ -H5FL_EXTERN(H5VL_object_t); - /*------------------------------------------------------------------------- * Function: H5T__commit_api_common * @@ -142,7 +136,7 @@ H5T__commit_api_common(hid_t loc_id, const char *name, hid_t type_id, hid_t lcpl HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to commit datatype"); /* Set up VOL object */ - if (NULL == (new_obj = H5VL_create_object(data, (*vol_obj_ptr)->connector))) + if (NULL == (new_obj = H5VL_create_object(data, H5VL_OBJ_CONNECTOR(*vol_obj_ptr)))) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, FAIL, "can't create VOL object for committed datatype"); /* Set the committed type object to the VOL connector pointer in the H5T_t struct */ @@ -210,7 +204,7 @@ H5Tcommit_async(const char *app_file, const char *app_func, unsigned app_line, h /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE10(__func__, "*s*sIui*siiiii", app_file, app_func, app_line, loc_id, name, type_id, lcpl_id, tcpl_id, tapl_id, es_id)) < 0) /* clang-format on */ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINSERT, FAIL, "can't insert token into event set"); @@ -358,7 +352,7 @@ H5Tcommit_anon(hid_t loc_id, hid_t type_id, hid_t tcpl_id, hid_t tapl_id) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to commit datatype"); /* Setup VOL object */ - if (NULL == (new_obj = H5VL_create_object(dt, vol_obj->connector))) + if (NULL == (new_obj = H5VL_create_object(dt, H5VL_OBJ_CONNECTOR(vol_obj)))) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, FAIL, "can't create VOL object for committed datatype"); /* Set the committed type object to the VOL connector pointer in the H5T_t struct */ @@ -657,13 +651,13 @@ H5T__open_api_common(hid_t loc_id, const char *name, hid_t tapl_id, void **token HGOTO_ERROR(H5E_DATATYPE, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open named datatype"); /* Register the type and return the ID */ - if ((ret_value = H5VL_register(H5I_DATATYPE, dt, (*vol_obj_ptr)->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_DATATYPE, dt, H5VL_OBJ_CONNECTOR(*vol_obj_ptr), true)) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register named datatype"); done: /* Cleanup on error */ if (H5I_INVALID_HID == ret_value) - if (dt && H5T_destruct_datatype(dt, (*vol_obj_ptr)->connector) < 0) + if (dt && H5T_destruct_datatype(dt, H5VL_OBJ_CONNECTOR(*vol_obj_ptr)) < 0) HDONE_ERROR(H5E_DATATYPE, H5E_CLOSEERROR, H5I_INVALID_HID, "unable to release datatype"); FUNC_LEAVE_NOAPI(ret_value) @@ -730,7 +724,7 @@ H5Topen_async(const char *app_file, const char *app_func, unsigned app_line, hid /* If a token was created, add the token to the event set */ if (NULL != token) /* clang-format off */ - if (H5ES_insert(es_id, vol_obj->connector, token, + if (H5ES_insert(es_id, H5VL_OBJ_CONNECTOR(vol_obj), token, H5ARG_TRACE7(__func__, "*s*sIui*sii", app_file, app_func, app_line, loc_id, name, tapl_id, es_id)) < 0) { /* clang-format on */ if (H5I_dec_app_ref_always_close(ret_value) < 0) @@ -1276,7 +1270,7 @@ H5T_construct_datatype(H5VL_object_t *vol_obj) *------------------------------------------------------------------------- */ static herr_t -H5T_destruct_datatype(void *datatype, H5VL_t *vol_connector) +H5T_destruct_datatype(void *datatype, H5VL_connector_t *vol_connector) { H5VL_object_t *vol_obj = NULL; herr_t ret_value = FAIL; diff --git a/src/H5Tdeprec.c b/src/H5Tdeprec.c index 3483597346e..2545858f760 100644 --- a/src/H5Tdeprec.c +++ b/src/H5Tdeprec.c @@ -126,7 +126,7 @@ H5Tcommit1(hid_t loc_id, const char *name, hid_t type_id) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to commit datatype"); /* Set up VOL object */ - if (NULL == (new_obj = H5VL_create_object(data, vol_obj->connector))) + if (NULL == (new_obj = H5VL_create_object(data, H5VL_OBJ_CONNECTOR(vol_obj)))) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, FAIL, "can't create VOL object for committed datatype"); /* Set the committed type object to the VOL connector pointer in the H5T_t struct */ @@ -176,7 +176,7 @@ H5Topen1(hid_t loc_id, const char *name) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTOPENOBJ, H5I_INVALID_HID, "unable to open named datatype"); /* Register the type and return the ID */ - if ((ret_value = H5VL_register(H5I_DATATYPE, dt, vol_obj->connector, true)) < 0) + if ((ret_value = H5VL_register(H5I_DATATYPE, dt, H5VL_OBJ_CONNECTOR(vol_obj), true)) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register named datatype"); done: diff --git a/src/H5Tpublic.h b/src/H5Tpublic.h index 57a5b6047c3..522488b0e90 100644 --- a/src/H5Tpublic.h +++ b/src/H5Tpublic.h @@ -1259,7 +1259,7 @@ H5_DLL hid_t H5Topen2(hid_t loc_id, const char *name, hid_t tapl_id); H5_DLL hid_t H5Topen_async(const char *app_file, const char *app_func, unsigned app_line, hid_t loc_id, const char *name, hid_t tapl_id, hid_t es_id); #else -H5_DLL hid_t H5Topen_async(hid_t loc_id, const char *name, hid_t tapl_id, hid_t es_id); +H5_DLL hid_t H5Topen_async(hid_t loc_id, const char *name, hid_t tapl_id, hid_t es_id); #endif /** * \ingroup H5T diff --git a/src/H5VL.c b/src/H5VL.c index 9eb4ab36daf..c3e737a1942 100644 --- a/src/H5VL.c +++ b/src/H5VL.c @@ -35,7 +35,7 @@ #include "H5VLpkg.h" /* Virtual Object Layer */ /* VOL connectors */ -#include "H5VLnative.h" /* Native VOL connector */ +#include "H5VLnative_private.h" /* Native VOL connector */ /****************/ /* Local Macros */ @@ -81,7 +81,8 @@ hid_t H5VLregister_connector(const H5VL_class_t *cls, hid_t vipl_id) { - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_connector_t *connector = NULL; + hid_t ret_value = H5I_INVALID_HID; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) @@ -92,10 +93,20 @@ H5VLregister_connector(const H5VL_class_t *cls, hid_t vipl_id) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "not a VOL initialize property list"); /* Register connector */ - if ((ret_value = H5VL__register_connector_by_class(cls, true, vipl_id)) < 0) + if (NULL == (connector = H5VL__register_connector_by_class(cls, vipl_id))) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL class"); + + /* Get ID for connector */ + if ((ret_value = H5I_register(H5I_VOL, connector, true)) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector"); done: + if (ret_value < 0) + /* Decrement refcount on connector */ + if (connector && H5VL_conn_dec_rc(connector) < 0) + HDONE_ERROR(H5E_VOL, H5E_CANTDEC, H5I_INVALID_HID, + "unable to decrement ref count on VOL connector"); + FUNC_LEAVE_API(ret_value) } /* end H5VLregister_connector() */ @@ -119,7 +130,8 @@ H5VLregister_connector(const H5VL_class_t *cls, hid_t vipl_id) hid_t H5VLregister_connector_by_name(const char *name, hid_t vipl_id) { - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_connector_t *connector = NULL; + hid_t ret_value = H5I_INVALID_HID; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) @@ -137,10 +149,20 @@ H5VLregister_connector_by_name(const char *name, hid_t vipl_id) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "not a VOL initialize property list"); /* Register connector */ - if ((ret_value = H5VL__register_connector_by_name(name, true, vipl_id)) < 0) + if (NULL == (connector = H5VL__register_connector_by_name(name, vipl_id))) HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector"); + /* Get ID for connector */ + if ((ret_value = H5I_register(H5I_VOL, connector, true)) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector ID"); + done: + if (ret_value < 0) + /* Decrement refcount on connector */ + if (connector && H5VL_conn_dec_rc(connector) < 0) + HDONE_ERROR(H5E_VOL, H5E_CANTDEC, H5I_INVALID_HID, + "unable to decrement ref count on VOL connector"); + FUNC_LEAVE_API(ret_value) } /* end H5VLregister_connector_by_name() */ @@ -164,7 +186,8 @@ H5VLregister_connector_by_name(const char *name, hid_t vipl_id) hid_t H5VLregister_connector_by_value(H5VL_class_value_t value, hid_t vipl_id) { - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_connector_t *connector = NULL; + hid_t ret_value = H5I_INVALID_HID; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) @@ -180,10 +203,20 @@ H5VLregister_connector_by_value(H5VL_class_value_t value, hid_t vipl_id) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "not a VOL initialize property list"); /* Register connector */ - if ((ret_value = H5VL__register_connector_by_value(value, true, vipl_id)) < 0) + if (NULL == (connector = H5VL__register_connector_by_value(value, vipl_id))) HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector"); + /* Get ID for connector */ + if ((ret_value = H5I_register(H5I_VOL, connector, true)) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector ID"); + done: + if (ret_value < 0) + /* Decrement refcount on connector */ + if (connector && H5VL_conn_dec_rc(connector) < 0) + HDONE_ERROR(H5E_VOL, H5E_CANTDEC, H5I_INVALID_HID, + "unable to decrement ref count on VOL connector"); + FUNC_LEAVE_API(ret_value) } /* end H5VLregister_connector_by_value() */ @@ -256,13 +289,18 @@ H5VLis_connector_registered_by_value(H5VL_class_value_t connector_value) hid_t H5VLget_connector_id(hid_t obj_id) { - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_object_t *vol_obj = NULL; + hid_t ret_value = H5I_INVALID_HID; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) - /* Get connector ID */ - if ((ret_value = H5VL__get_connector_id(obj_id, true)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTGET, H5I_INVALID_HID, "can't get VOL id"); + /* Get the underlying VOL object for the object ID */ + if (NULL == (vol_obj = H5VL_vol_object(obj_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); + + /* Register an ID for the connector */ + if ((ret_value = H5VL_conn_register(vol_obj->connector)) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "can't get VOL ID"); done: FUNC_LEAVE_API(ret_value) @@ -285,15 +323,26 @@ H5VLget_connector_id(hid_t obj_id) hid_t H5VLget_connector_id_by_name(const char *name) { - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_connector_t *connector = NULL; + hid_t ret_value = H5I_INVALID_HID; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) /* Get connector ID with this name */ - if ((ret_value = H5VL__get_connector_id_by_name(name, true)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTGET, H5I_INVALID_HID, "can't get VOL id"); + if (NULL == (connector = H5VL__get_connector_by_name(name))) + HGOTO_ERROR(H5E_VOL, H5E_CANTGET, H5I_INVALID_HID, "can't get VOL connector"); + + /* Get ID for connector */ + if ((ret_value = H5I_register(H5I_VOL, connector, true)) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector ID"); done: + if (ret_value < 0) + /* Decrement refcount on connector */ + if (connector && H5VL_conn_dec_rc(connector) < 0) + HDONE_ERROR(H5E_VOL, H5E_CANTDEC, H5I_INVALID_HID, + "unable to decrement ref count on VOL connector"); + FUNC_LEAVE_API(ret_value) } /* end H5VLget_connector_id_by_name() */ @@ -314,77 +363,28 @@ H5VLget_connector_id_by_name(const char *name) hid_t H5VLget_connector_id_by_value(H5VL_class_value_t connector_value) { - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_connector_t *connector = NULL; + hid_t ret_value = H5I_INVALID_HID; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) /* Get connector ID with this value */ - if ((ret_value = H5VL__get_connector_id_by_value(connector_value, true)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTGET, H5I_INVALID_HID, "can't get VOL id"); - -done: - FUNC_LEAVE_API(ret_value) -} /* end H5VLget_connector_id_by_value() */ + if (NULL == (connector = H5VL__get_connector_by_value(connector_value))) + HGOTO_ERROR(H5E_VOL, H5E_CANTGET, H5I_INVALID_HID, "can't get VOL connector"); -/*------------------------------------------------------------------------- - * Function: H5VLpeek_connector_id_by_name - * - * Purpose: Retrieves the ID for a registered VOL connector. - * - * Return: A valid VOL connector ID if a connector by that name has - * been registered. This ID is *not* owned by the caller and - * H5VLclose() should not be called. Intended for use by VOL - * connectors to find their own ID. - * - * H5I_INVALID_HID on error or if a VOL connector of that - * name has not been registered. - * - *------------------------------------------------------------------------- - */ -hid_t -H5VLpeek_connector_id_by_name(const char *name) -{ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ - - FUNC_ENTER_API(H5I_INVALID_HID) - - /* Get connector ID with this name */ - if ((ret_value = H5VL__peek_connector_id_by_name(name)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTGET, H5I_INVALID_HID, "can't get VOL id"); + /* Get ID for connector */ + if ((ret_value = H5I_register(H5I_VOL, connector, true)) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector ID"); done: - FUNC_LEAVE_API(ret_value) -} /* end H5VLpeek_connector_id_by_name() */ + if (ret_value < 0) + /* Decrement refcount on connector */ + if (connector && H5VL_conn_dec_rc(connector) < 0) + HDONE_ERROR(H5E_VOL, H5E_CANTDEC, H5I_INVALID_HID, + "unable to decrement ref count on VOL connector"); -/*------------------------------------------------------------------------- - * Function: H5VLpeek_connector_id_by_value - * - * Purpose: Retrieves the ID for a registered VOL connector. - * - * Return: A valid VOL connector ID if a connector with that value - * has been registered. This ID is *not* owned by the caller - * and H5VLclose() should not be called. Intended for use by - * VOL connectors to find their own ID. - * - * H5I_INVALID_HID on error or if a VOL connector with that - * value has not been registered. - * - *------------------------------------------------------------------------- - */ -hid_t -H5VLpeek_connector_id_by_value(H5VL_class_value_t value) -{ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ - - FUNC_ENTER_API(H5I_INVALID_HID) - - /* Get connector ID with this value */ - if ((ret_value = H5VL__peek_connector_id_by_value(value)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTGET, H5I_INVALID_HID, "can't get VOL id"); - -done: FUNC_LEAVE_API(ret_value) -} /* end H5VLpeek_connector_id_by_value() */ +} /* end H5VLget_connector_id_by_value() */ /*------------------------------------------------------------------------- * Function: H5VLget_connector_name @@ -406,13 +406,17 @@ H5VLpeek_connector_id_by_value(H5VL_class_value_t value) ssize_t H5VLget_connector_name(hid_t obj_id, char *name /*out*/, size_t size) { - ssize_t ret_value = -1; + H5VL_object_t *vol_obj; + ssize_t ret_value = -1; FUNC_ENTER_API(FAIL) + /* Get the object pointer */ + if (NULL == (vol_obj = H5VL_vol_object(obj_id))) + HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, FAIL, "invalid VOL identifier"); + /* Call internal routine */ - if ((ret_value = H5VL__get_connector_name(obj_id, name, size)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "Can't get connector name"); + ret_value = (ssize_t)H5VL__get_connector_name(vol_obj->connector, name, size); done: FUNC_LEAVE_API(ret_value) @@ -462,7 +466,6 @@ H5VLclose(hid_t vol_id) * to do so are considered an error. * * Return: Success: Non-negative - * * Failure: Negative * *------------------------------------------------------------------------- @@ -470,30 +473,28 @@ H5VLclose(hid_t vol_id) herr_t H5VLunregister_connector(hid_t vol_id) { - hid_t native_id = H5I_INVALID_HID; - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *native, *connector; + int cmp_value; /* Comparison result */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) /* Check arguments */ - if (NULL == H5I_object_verify(vol_id, H5I_VOL)) + if (NULL == (connector = H5I_object_verify(vol_id, H5I_VOL))) HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* For the time being, we disallow unregistering the native VOL connector */ - if (H5I_INVALID_HID == (native_id = H5VL__get_connector_id_by_name(H5VL_NATIVE_NAME, false))) - HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "unable to find the native VOL connector ID"); - if (vol_id == native_id) + native = H5VL_NATIVE_conn_g; + if (H5VL_cmp_connector_cls(&cmp_value, connector->cls, native->cls) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); + if (0 == cmp_value) HGOTO_ERROR(H5E_VOL, H5E_BADVALUE, FAIL, "unregistering the native VOL connector is not allowed"); - /* The H5VL_class_t struct will be freed by this function */ + /* The H5VL_connector_t struct will be freed by this function */ if (H5I_dec_app_ref(vol_id) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTDEC, FAIL, "unable to unregister VOL connector"); done: - if (native_id != H5I_INVALID_HID) - if (H5I_dec_ref(native_id) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTDEC, FAIL, "unable to decrement count on native_id"); - FUNC_LEAVE_API(ret_value) } /* end H5VLunregister_connector() */ @@ -514,19 +515,19 @@ H5VLunregister_connector(hid_t vol_id) herr_t H5VLcmp_connector_cls(int *cmp, hid_t connector_id1, hid_t connector_id2) { - H5VL_class_t *cls1, *cls2; /* connectors for IDs */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *conn1, *conn2; /* Connectors for IDs */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) /* Check args and get class pointers */ - if (NULL == (cls1 = (H5VL_class_t *)H5I_object_verify(connector_id1, H5I_VOL))) + if (NULL == (conn1 = H5I_object_verify(connector_id1, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); - if (NULL == (cls2 = (H5VL_class_t *)H5I_object_verify(connector_id2, H5I_VOL))) + if (NULL == (conn2 = H5I_object_verify(connector_id2, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Compare the two VOL connector classes */ - if (H5VL_cmp_connector_cls(cmp, cls1, cls2) < 0) + if (H5VL_cmp_connector_cls(cmp, conn1->cls, conn2->cls) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); done: @@ -674,25 +675,28 @@ H5VLobject_is_native(hid_t obj_id, hbool_t *is_native) hid_t H5VLget_file_type(void *file_obj, hid_t connector_id, hid_t dtype_id) { - H5T_t *dtype; /* unregistered type */ - H5T_t *file_type = NULL; /* copied file type */ - hid_t file_type_id = -1; /* copied file type id */ - H5VL_object_t *file_vol_obj = NULL; /* VOL object for file */ - hid_t ret_value = -1; /* Return value */ + H5T_t *dtype; /* unregistered type */ + H5T_t *file_type = NULL; /* copied file type */ + hid_t file_type_id = -1; /* copied file type id */ + H5VL_connector_t *connector; /* VOL connector */ + H5VL_object_t *file_vol_obj = NULL; /* VOL object for file */ + hid_t ret_value = -1; /* Return value */ FUNC_ENTER_API(FAIL) /* Check args */ if (!file_obj) HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, "no file object supplied"); - if (NULL == (dtype = (H5T_t *)H5I_object_verify(dtype_id, H5I_DATATYPE))) + if (NULL == (dtype = H5I_object_verify(dtype_id, H5I_DATATYPE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type"); + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file VOL ID"); /* Create VOL object for file if necessary (force_conv will be true if and * only if file needs to be passed to H5T_set_loc) */ - if (H5T_GET_FORCE_CONV(dtype) && - (NULL == (file_vol_obj = H5VL_create_object_using_vol_id(H5I_FILE, file_obj, connector_id)))) - HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, FAIL, "can't create VOL object"); + if (H5T_GET_FORCE_CONV(dtype)) + if (NULL == (file_vol_obj = H5VL_new_vol_obj(H5I_FILE, file_obj, connector, true))) + HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, FAIL, "can't create VOL object"); /* Copy the datatype */ if (NULL == (file_type = H5T_copy(dtype, H5T_COPY_TRANSIENT))) diff --git a/src/H5VLcallback.c b/src/H5VLcallback.c index 0e696088ebf..497e53908d5 100644 --- a/src/H5VLcallback.c +++ b/src/H5VLcallback.c @@ -50,10 +50,9 @@ * VOL connector to open a given file with. */ typedef struct H5VL_file_open_find_connector_t { - const char *filename; - const H5VL_class_t *cls; - H5VL_connector_prop_t *connector_prop; - hid_t fapl_id; + const char *filename; + const H5VL_class_t *cls; + hid_t fapl_id; } H5VL_file_open_find_connector_t; /* Typedef for common callback form of registered optional operations */ @@ -67,6 +66,12 @@ typedef herr_t (*H5VL_reg_opt_oper_t)(void *obj, const H5VL_class_t *cls, H5VL_o /********************/ /* Local Prototypes */ /********************/ +/* Helper routines */ +static herr_t H5VL__common_optional_op(hid_t id, H5I_type_t id_type, H5VL_reg_opt_oper_t reg_opt_op, + H5VL_optional_args_t *args, hid_t dxpl_id, void **req, + H5VL_object_t **_vol_obj_ptr); + +/* VOL connector callback equivalents */ static void *H5VL__attr_create(void *obj, const H5VL_loc_params_t *loc_params, const H5VL_class_t *cls, const char *name, hid_t type_id, hid_t space_id, hid_t acpl_id, hid_t aapl_id, hid_t dxpl_id, void **req); @@ -218,17 +223,17 @@ static herr_t H5VL__optional(void *obj, const H5VL_class_t *cls, H5VL_optional_a herr_t H5VLinitialize(hid_t connector_id, hid_t vipl_id) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT /* Check args */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Invoke class' callback, if there is one */ - if (cls->initialize && cls->initialize(vipl_id) < 0) + if (connector->cls->initialize && connector->cls->initialize(vipl_id) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCLOSEOBJ, FAIL, "VOL connector did not initialize"); done: @@ -248,17 +253,17 @@ H5VLinitialize(hid_t connector_id, hid_t vipl_id) herr_t H5VLterminate(hid_t connector_id) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT /* Check args */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Invoke class' callback, if there is one */ - if (cls->terminate && cls->terminate() < 0) + if (connector->cls->terminate && connector->cls->terminate() < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCLOSEOBJ, FAIL, "VOL connector did not terminate cleanly"); done: @@ -278,18 +283,18 @@ H5VLterminate(hid_t connector_id) herr_t H5VLget_cap_flags(hid_t connector_id, uint64_t *cap_flags /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT /* Check args */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Retrieve capability flags */ if (cap_flags) - *cap_flags = cls->cap_flags; + *cap_flags = connector->cls->cap_flags; done: FUNC_LEAVE_API_NOINIT(ret_value) @@ -308,18 +313,18 @@ H5VLget_cap_flags(hid_t connector_id, uint64_t *cap_flags /*out*/) herr_t H5VLget_value(hid_t connector_id, H5VL_class_value_t *value /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT /* Check args */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Retrieve connector value */ if (value) - *value = cls->value; + *value = connector->cls->value; done: FUNC_LEAVE_API_NOINIT(ret_value) @@ -380,7 +385,7 @@ H5VL__common_optional_op(hid_t id, H5I_type_t id_type, H5VL_reg_opt_oper_t reg_o *------------------------------------------------------------------------- */ herr_t -H5VL_copy_connector_info(const H5VL_class_t *connector, void **dst_info, const void *src_info) +H5VL_copy_connector_info(const H5VL_connector_t *connector, void **dst_info, const void *src_info) { void *new_connector_info = NULL; /* Copy of connector info */ herr_t ret_value = SUCCEED; /* Return value */ @@ -393,14 +398,14 @@ H5VL_copy_connector_info(const H5VL_class_t *connector, void **dst_info, const v /* Check for actual source info */ if (src_info) { /* Allow the connector to copy or do it ourselves */ - if (connector->info_cls.copy) { - if (NULL == (new_connector_info = (connector->info_cls.copy)(src_info))) + if (connector->cls->info_cls.copy) { + if (NULL == (new_connector_info = (connector->cls->info_cls.copy)(src_info))) HGOTO_ERROR(H5E_VOL, H5E_CANTCOPY, FAIL, "connector info copy callback failed"); } /* end if */ - else if (connector->info_cls.size > 0) { - if (NULL == (new_connector_info = H5MM_malloc(connector->info_cls.size))) + else if (connector->cls->info_cls.size > 0) { + if (NULL == (new_connector_info = H5MM_malloc(connector->cls->info_cls.size))) HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, FAIL, "connector info allocation failed"); - H5MM_memcpy(new_connector_info, src_info, connector->info_cls.size); + H5MM_memcpy(new_connector_info, src_info, connector->cls->info_cls.size); } /* end else-if */ else HGOTO_ERROR(H5E_VOL, H5E_UNSUPPORTED, FAIL, "no way to copy connector info"); @@ -426,17 +431,17 @@ H5VL_copy_connector_info(const H5VL_class_t *connector, void **dst_info, const v herr_t H5VLcopy_connector_info(hid_t connector_id, void **dst_vol_info, void *src_vol_info) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Check args and get VOL connector */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Copy the VOL connector's info object */ - if (H5VL_copy_connector_info(cls, dst_vol_info, src_vol_info) < 0) + if (H5VL_copy_connector_info(connector, dst_vol_info, src_vol_info) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCOPY, FAIL, "unable to copy VOL connector info object"); done: @@ -457,7 +462,8 @@ H5VLcopy_connector_info(hid_t connector_id, void **dst_vol_info, void *src_vol_i *------------------------------------------------------------------------- */ herr_t -H5VL_cmp_connector_info(const H5VL_class_t *connector, int *cmp_value, const void *info1, const void *info2) +H5VL_cmp_connector_info(const H5VL_connector_t *connector, int *cmp_value, const void *info1, + const void *info2) { herr_t ret_value = SUCCEED; /* Return value */ @@ -485,13 +491,13 @@ H5VL_cmp_connector_info(const H5VL_class_t *connector, int *cmp_value, const voi * if there is a callback, otherwise just compare the info objects as * memory buffers */ - if (connector->info_cls.cmp) { - if ((connector->info_cls.cmp)(cmp_value, info1, info2) < 0) + if (connector->cls->info_cls.cmp) { + if ((connector->cls->info_cls.cmp)(cmp_value, info1, info2) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCOMPARE, FAIL, "can't compare connector info"); } /* end if */ else { - assert(connector->info_cls.size > 0); - *cmp_value = memcmp(info1, info2, connector->info_cls.size); + assert(connector->cls->info_cls.size > 0); + *cmp_value = memcmp(info1, info2, connector->cls->info_cls.size); } /* end else */ done: @@ -516,18 +522,18 @@ H5VL_cmp_connector_info(const H5VL_class_t *connector, int *cmp_value, const voi herr_t H5VLcmp_connector_info(int *cmp, hid_t connector_id, const void *info1, const void *info2) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) - /* Check args and get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Check args and get VOL connector */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Compare the two VOL connector info objects */ if (cmp) - H5VL_cmp_connector_info(cls, cmp, info1, info2); + H5VL_cmp_connector_info(connector, cmp, info1, info2); done: FUNC_LEAVE_API(ret_value) @@ -544,26 +550,21 @@ H5VLcmp_connector_info(int *cmp, hid_t connector_id, const void *info1, const vo *------------------------------------------------------------------------- */ herr_t -H5VL_free_connector_info(hid_t connector_id, const void *info) +H5VL_free_connector_info(const H5VL_connector_t *connector, const void *info) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) /* Sanity check */ - assert(connector_id > 0); - - /* Check args and get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) - HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, FAIL, "not a VOL connector ID"); + assert(connector); /* Only free info object, if it's non-NULL */ if (info) { /* Allow the connector to free info or do it ourselves */ - if (cls->info_cls.free) { + if (connector->cls->info_cls.free) { /* Cast through uintptr_t to de-const memory */ - if ((cls->info_cls.free)((void *)(uintptr_t)info) < 0) + if ((connector->cls->info_cls.free)((void *)(uintptr_t)info) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "connector info free request failed"); } else @@ -587,12 +588,17 @@ H5VL_free_connector_info(hid_t connector_id, const void *info) herr_t H5VLfree_connector_info(hid_t connector_id, void *info) { - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT + /* Check args and get VOL connector */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); + /* Free the VOL connector info object */ - if (H5VL_free_connector_info(connector_id, info) < 0) + if (H5VL_free_connector_info(connector, info) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "unable to release VOL connector info object"); done: @@ -618,15 +624,15 @@ H5VLconnector_info_to_str(const void *info, hid_t connector_id, char **str) /* Only serialize info object, if it's non-NULL */ if (info) { - H5VL_class_t *cls; /* VOL connector's class struct */ + H5VL_connector_t *connector; /* VOL connector */ - /* Check args and get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Check args and get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Allow the connector to serialize info */ - if (cls->info_cls.to_str) { - if ((cls->info_cls.to_str)(info, str) < 0) + if (connector->cls->info_cls.to_str) { + if ((connector->cls->info_cls.to_str)(info, str) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTSERIALIZE, FAIL, "can't serialize connector info"); } /* end if */ else @@ -639,6 +645,40 @@ H5VLconnector_info_to_str(const void *info, hid_t connector_id, char **str) FUNC_LEAVE_API_NOINIT(ret_value) } /* H5VLconnector_info_to_str() */ +/*------------------------------------------------------------------------- + * Function: H5VL__connector_str_to_info + * + * Purpose: Deserializes a string into a connector's info object + * + * Return: Success: Non-negative + * Failure: Negative + * + *------------------------------------------------------------------------- + */ +herr_t +H5VL__connector_str_to_info(const char *str, H5VL_connector_t *connector, void **info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Only deserialize string, if it's non-NULL */ + if (str) { + /* Allow the connector to deserialize info */ + if (connector->cls->info_cls.from_str) { + if ((connector->cls->info_cls.from_str)(str, info) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTUNSERIALIZE, FAIL, "can't deserialize connector info"); + } /* end if */ + else + *info = NULL; + } /* end if */ + else + *info = NULL; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5VL__connector_str_to_info() */ + /*--------------------------------------------------------------------------- * Function: H5VLconnector_str_to_info * @@ -652,12 +692,17 @@ H5VLconnector_info_to_str(const void *info, hid_t connector_id, char **str) herr_t H5VLconnector_str_to_info(const char *str, hid_t connector_id, void **info /*out*/) { - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector = NULL; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT + /* Check args and get VOL connector */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); + /* Call internal routine */ - if (H5VL__connector_str_to_info(str, connector_id, info) < 0) + if (H5VL__connector_str_to_info(str, connector, info) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTDECODE, FAIL, "can't deserialize connector info"); done: @@ -677,20 +722,20 @@ H5VLconnector_str_to_info(const char *str, hid_t connector_id, void **info /*out void * H5VLget_object(void *obj, hid_t connector_id) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT /* Check args */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Check for 'get_object' callback in connector */ - if (cls->wrap_cls.get_object) - ret_value = (cls->wrap_cls.get_object)(obj); + if (connector->cls->wrap_cls.get_object) + ret_value = (connector->cls->wrap_cls.get_object)(obj); else ret_value = obj; @@ -698,44 +743,6 @@ H5VLget_object(void *obj, hid_t connector_id) FUNC_LEAVE_API_NOINIT(ret_value) } /* H5VLget_object */ -/*------------------------------------------------------------------------- - * Function: H5VL_get_wrap_ctx - * - * Purpose: Retrieve the VOL object wrapping context for a connector - * - * Return: Success: Non-negative - * Failure: Negative - * - *------------------------------------------------------------------------- - */ -herr_t -H5VL_get_wrap_ctx(const H5VL_class_t *connector, void *obj, void **wrap_ctx) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - assert(connector); - assert(obj); - assert(wrap_ctx); - - /* Allow the connector to copy or do it ourselves */ - if (connector->wrap_cls.get_wrap_ctx) { - /* Sanity check */ - assert(connector->wrap_cls.free_wrap_ctx); - - /* Invoke connector's callback */ - if ((connector->wrap_cls.get_wrap_ctx)(obj, wrap_ctx) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "connector wrap context callback failed"); - } /* end if */ - else - *wrap_ctx = NULL; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL_get_wrap_ctx() */ - /*--------------------------------------------------------------------------- * Function: H5VLget_wrap_ctx * @@ -752,18 +759,26 @@ H5VL_get_wrap_ctx(const H5VL_class_t *connector, void *obj, void **wrap_ctx) herr_t H5VLget_wrap_ctx(void *obj, hid_t connector_id, void **wrap_ctx /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Check args and get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); - /* Get the VOL connector's object wrapper */ - if (H5VL_get_wrap_ctx(cls, obj, wrap_ctx) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "unable to retrieve VOL connector object wrap context"); + /* Allow the connector to wrap */ + if (connector->cls->wrap_cls.get_wrap_ctx) { + /* Sanity check */ + assert(connector->cls->wrap_cls.free_wrap_ctx); + + /* Invoke cls's callback */ + if ((connector->cls->wrap_cls.get_wrap_ctx)(obj, wrap_ctx) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "connector wrap context callback failed"); + } /* end if */ + else + *wrap_ctx = NULL; done: FUNC_LEAVE_API_NOINIT(ret_value) @@ -780,20 +795,20 @@ H5VLget_wrap_ctx(void *obj, hid_t connector_id, void **wrap_ctx /*out*/) *------------------------------------------------------------------------- */ void * -H5VL_wrap_object(const H5VL_class_t *connector, void *wrap_ctx, void *obj, H5I_type_t obj_type) +H5VL_wrap_object(const H5VL_class_t *cls, void *wrap_ctx, void *obj, H5I_type_t obj_type) { void *ret_value = NULL; /* Return value */ FUNC_ENTER_NOAPI(NULL) /* Sanity checks */ - assert(connector); + assert(cls); assert(obj); /* Only wrap object if there's a wrap context */ if (wrap_ctx) { /* Ask the connector to wrap the object */ - if (NULL == (ret_value = (connector->wrap_cls.wrap_object)(obj, obj_type, wrap_ctx))) + if (NULL == (ret_value = (cls->wrap_cls.wrap_object)(obj, obj_type, wrap_ctx))) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, NULL, "can't wrap object"); } /* end if */ else @@ -816,19 +831,19 @@ H5VL_wrap_object(const H5VL_class_t *connector, void *wrap_ctx, void *obj, H5I_t void * H5VLwrap_object(void *obj, H5I_type_t obj_type, hid_t connector_id, void *wrap_ctx) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Wrap the object */ - if (NULL == (ret_value = H5VL_wrap_object(cls, wrap_ctx, obj, obj_type))) + if (NULL == (ret_value = H5VL_wrap_object(connector->cls, wrap_ctx, obj, obj_type))) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, NULL, "unable to wrap object"); done: @@ -846,20 +861,20 @@ H5VLwrap_object(void *obj, H5I_type_t obj_type, hid_t connector_id, void *wrap_c *------------------------------------------------------------------------- */ void * -H5VL_unwrap_object(const H5VL_class_t *connector, void *obj) +H5VL_unwrap_object(const H5VL_class_t *cls, void *obj) { void *ret_value = NULL; /* Return value */ FUNC_ENTER_NOAPI(NULL) /* Sanity checks */ - assert(connector); + assert(cls); assert(obj); /* Only unwrap object if there's an unwrap callback */ - if (connector->wrap_cls.wrap_object) { + if (cls->wrap_cls.wrap_object) { /* Ask the connector to unwrap the object */ - if (NULL == (ret_value = (connector->wrap_cls.unwrap_object)(obj))) + if (NULL == (ret_value = (cls->wrap_cls.unwrap_object)(obj))) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, NULL, "can't unwrap object"); } /* end if */ else @@ -882,56 +897,25 @@ H5VL_unwrap_object(const H5VL_class_t *connector, void *obj) void * H5VLunwrap_object(void *obj, hid_t connector_id) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Unwrap the object */ - if (NULL == (ret_value = H5VL_unwrap_object(cls, obj))) + if (NULL == (ret_value = H5VL_unwrap_object(connector->cls, obj))) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, NULL, "unable to unwrap object"); done: FUNC_LEAVE_API_NOINIT(ret_value) } /* H5VLunwrap_object */ -/*------------------------------------------------------------------------- - * Function: H5VL_free_wrap_ctx - * - * Purpose: Free object wrapping context for a connector - * - * Return: Success: Non-negative - * Failure: Negative - * - *------------------------------------------------------------------------- - */ -herr_t -H5VL_free_wrap_ctx(const H5VL_class_t *connector, void *wrap_ctx) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Sanity checks */ - assert(connector); - - /* Only free wrap context, if it's non-NULL */ - if (wrap_ctx) { - /* Free the connector's object wrapping context */ - if ((connector->wrap_cls.free_wrap_ctx)(wrap_ctx) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "connector wrap context free request failed"); - } /* end if */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL_free_wrap_ctx() */ - /*--------------------------------------------------------------------------- * Function: H5VLfree_wrap_ctx * @@ -945,18 +929,20 @@ H5VL_free_wrap_ctx(const H5VL_class_t *connector, void *wrap_ctx) herr_t H5VLfree_wrap_ctx(void *wrap_ctx, hid_t connector_id) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Check args and get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); - /* Release the VOL connector's object wrapper */ - if (H5VL_free_wrap_ctx(cls, wrap_ctx) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "unable to release VOL connector object wrap context"); + /* Only free wrap context, if it's non-NULL */ + if (wrap_ctx) + /* Free the connector's object wrapping context */ + if ((connector->cls->wrap_cls.free_wrap_ctx)(wrap_ctx) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "connector wrap context free request failed"); done: FUNC_LEAVE_API_NOINIT(ret_value) @@ -1045,20 +1031,20 @@ H5VLattr_create(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_ hid_t type_id, hid_t space_id, hid_t acpl_id, hid_t aapl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__attr_create(obj, loc_params, cls, name, type_id, space_id, acpl_id, - aapl_id, dxpl_id, req))) + if (NULL == (ret_value = H5VL__attr_create(obj, loc_params, connector->cls, name, type_id, space_id, + acpl_id, aapl_id, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, NULL, "unable to create attribute"); done: @@ -1146,19 +1132,19 @@ void * H5VLattr_open(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, const char *name, hid_t aapl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__attr_open(obj, loc_params, cls, name, aapl_id, dxpl_id, req))) + if (NULL == (ret_value = H5VL__attr_open(obj, loc_params, connector->cls, name, aapl_id, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTOPENOBJ, NULL, "unable to open attribute"); done: @@ -1242,19 +1228,19 @@ H5VL_attr_read(const H5VL_object_t *vol_obj, hid_t mem_type_id, void *buf, hid_t herr_t H5VLattr_read(void *obj, hid_t connector_id, hid_t mem_type_id, void *buf, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__attr_read(obj, cls, mem_type_id, buf, dxpl_id, req) < 0) + if (H5VL__attr_read(obj, connector->cls, mem_type_id, buf, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_READERROR, FAIL, "unable to read attribute"); done: @@ -1340,19 +1326,19 @@ herr_t H5VLattr_write(void *obj, hid_t connector_id, hid_t mem_type_id, const void *buf, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__attr_write(obj, cls, mem_type_id, buf, dxpl_id, req) < 0) + if (H5VL__attr_write(obj, connector->cls, mem_type_id, buf, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_WRITEERROR, FAIL, "unable to write attribute"); done: @@ -1436,21 +1422,21 @@ H5VL_attr_get(const H5VL_object_t *vol_obj, H5VL_attr_get_args_t *args, hid_t dx herr_t H5VLattr_get(void *obj, hid_t connector_id, H5VL_attr_get_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); if (NULL == args) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid argument struct"); /* Call the corresponding internal VOL routine */ - if (H5VL__attr_get(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__attr_get(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "unable to get attribute information"); done: @@ -1540,20 +1526,20 @@ herr_t H5VLattr_specific(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, H5VL_attr_specific_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ /* (Must return value from callback, for iterators) */ - if ((ret_value = H5VL__attr_specific(obj, loc_params, cls, args, dxpl_id, req)) < 0) + if ((ret_value = H5VL__attr_specific(obj, loc_params, connector->cls, args, dxpl_id, req)) < 0) HERROR(H5E_VOL, H5E_CANTOPERATE, "unable to execute attribute 'specific' callback"); done: @@ -1640,20 +1626,20 @@ herr_t H5VLattr_optional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ /* (Must return value from callback, for iterators) */ - if ((ret_value = H5VL__attr_optional(obj, cls, args, dxpl_id, req)) < 0) + if ((ret_value = H5VL__attr_optional(obj, connector->cls, args, dxpl_id, req)) < 0) HERROR(H5E_VOL, H5E_CANTOPERATE, "unable to execute attribute optional callback"); done: @@ -1782,19 +1768,19 @@ H5VL_attr_close(const H5VL_object_t *vol_obj, hid_t dxpl_id, void **req) herr_t H5VLattr_close(void *obj, hid_t connector_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__attr_close(obj, cls, dxpl_id, req) < 0) + if (H5VL__attr_close(obj, connector->cls, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCLOSEOBJ, FAIL, "unable to close attribute"); done: @@ -1887,20 +1873,20 @@ H5VLdataset_create(void *obj, const H5VL_loc_params_t *loc_params, hid_t connect hid_t lcpl_id, hid_t type_id, hid_t space_id, hid_t dcpl_id, hid_t dapl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__dataset_create(obj, loc_params, cls, name, lcpl_id, type_id, space_id, - dcpl_id, dapl_id, dxpl_id, req))) + if (NULL == (ret_value = H5VL__dataset_create(obj, loc_params, connector->cls, name, lcpl_id, type_id, + space_id, dcpl_id, dapl_id, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, NULL, "unable to create dataset"); done: @@ -1988,19 +1974,20 @@ void * H5VLdataset_open(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, const char *name, hid_t dapl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__dataset_open(obj, loc_params, cls, name, dapl_id, dxpl_id, req))) + if (NULL == + (ret_value = H5VL__dataset_open(obj, loc_params, connector->cls, name, dapl_id, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTOPENOBJ, NULL, "unable to open dataset"); done: @@ -2042,7 +2029,7 @@ H5VL__dataset_read(size_t count, void *obj[], const H5VL_class_t *cls, hid_t mem * * Purpose: Reads data from dataset through the VOL. This is like * H5VL_dataset_read, but takes an array of void * for the - * objects and a class pointer instead of an array of + * objects and a connector pointer instead of an array of * H5VL_object_t. This allows us to avoid allocating and * copying an extra array (of H5VL_object_ts). * @@ -2052,8 +2039,8 @@ H5VL__dataset_read(size_t count, void *obj[], const H5VL_class_t *cls, hid_t mem *------------------------------------------------------------------------- */ herr_t -H5VL_dataset_read(size_t count, void *obj[], H5VL_t *connector, hid_t mem_type_id[], hid_t mem_space_id[], - hid_t file_space_id[], hid_t dxpl_id, void *buf[], void **req) +H5VL_dataset_read(size_t count, void *obj[], H5VL_connector_t *connector, hid_t mem_type_id[], + hid_t mem_space_id[], hid_t file_space_id[], hid_t dxpl_id, void *buf[], void **req) { bool vol_wrapper_set = false; /* Whether the VOL object wrapping context was set up */ H5VL_object_t tmp_vol_obj; /* Temporary VOL object for setting VOL wrapper */ @@ -2099,13 +2086,13 @@ herr_t H5VLdataset_read(size_t count, void *obj[], hid_t connector_id, hid_t mem_type_id[], hid_t mem_space_id[], hid_t file_space_id[], hid_t dxpl_id, void *buf[], void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - size_t i; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + size_t i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "obj array not provided"); for (i = 1; i < count; i++) @@ -2119,11 +2106,12 @@ H5VLdataset_read(size_t count, void *obj[], hid_t connector_id, hid_t mem_type_i HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file_space_id array not provided"); if (NULL == buf) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "buf array not provided"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__dataset_read(count, obj, cls, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, req) < 0) + if (H5VL__dataset_read(count, obj, connector->cls, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, + req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, FAIL, "unable to read dataset"); done: @@ -2165,7 +2153,7 @@ H5VL__dataset_write(size_t count, void *obj[], const H5VL_class_t *cls, hid_t me * * Purpose: Writes data from dataset through the VOL. This is like * H5VL_dataset_write, but takes an array of void * for the - * objects and a class pointer instead of an array of + * objects and a connector pointer instead of an array of * H5VL_object_t. This allows us to avoid allocating and * copying an extra array (of H5VL_object_ts). * @@ -2175,8 +2163,8 @@ H5VL__dataset_write(size_t count, void *obj[], const H5VL_class_t *cls, hid_t me *------------------------------------------------------------------------- */ herr_t -H5VL_dataset_write(size_t count, void *obj[], H5VL_t *connector, hid_t mem_type_id[], hid_t mem_space_id[], - hid_t file_space_id[], hid_t dxpl_id, const void *buf[], void **req) +H5VL_dataset_write(size_t count, void *obj[], H5VL_connector_t *connector, hid_t mem_type_id[], + hid_t mem_space_id[], hid_t file_space_id[], hid_t dxpl_id, const void *buf[], void **req) { bool vol_wrapper_set = false; /* Whether the VOL object wrapping context was set up */ H5VL_object_t tmp_vol_obj; /* Temporary VOL object for setting VOL wrapper */ @@ -2222,13 +2210,13 @@ herr_t H5VLdataset_write(size_t count, void *obj[], hid_t connector_id, hid_t mem_type_id[], hid_t mem_space_id[], hid_t file_space_id[], hid_t dxpl_id, const void *buf[], void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - size_t i; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + size_t i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "obj array not provided"); for (i = 1; i < count; i++) @@ -2242,11 +2230,12 @@ H5VLdataset_write(size_t count, void *obj[], hid_t connector_id, hid_t mem_type_ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file_space_id array not provided"); if (NULL == buf) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "buf array not provided"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__dataset_write(count, obj, cls, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, req) < 0) + if (H5VL__dataset_write(count, obj, connector->cls, mem_type_id, mem_space_id, file_space_id, dxpl_id, + buf, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, FAIL, "unable to write dataset"); done: @@ -2332,19 +2321,19 @@ herr_t H5VLdataset_get(void *obj, hid_t connector_id, H5VL_dataset_get_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__dataset_get(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__dataset_get(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "unable to execute dataset get callback"); done: @@ -2431,19 +2420,19 @@ herr_t H5VLdataset_specific(void *obj, hid_t connector_id, H5VL_dataset_specific_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__dataset_specific(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__dataset_specific(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute dataset specific callback"); done: @@ -2529,19 +2518,19 @@ herr_t H5VLdataset_optional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__dataset_optional(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__dataset_optional(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute dataset optional callback"); done: @@ -2677,19 +2666,19 @@ H5VL_dataset_close(const H5VL_object_t *vol_obj, hid_t dxpl_id, void **req) herr_t H5VLdataset_close(void *obj, hid_t connector_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__dataset_close(obj, cls, dxpl_id, req) < 0) + if (H5VL__dataset_close(obj, connector->cls, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCLOSEOBJ, FAIL, "unable to close dataset"); done: @@ -2780,20 +2769,20 @@ H5VLdatatype_commit(void *obj, const H5VL_loc_params_t *loc_params, hid_t connec hid_t type_id, hid_t lcpl_id, hid_t tcpl_id, hid_t tapl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__datatype_commit(obj, loc_params, cls, name, type_id, lcpl_id, tcpl_id, - tapl_id, dxpl_id, req))) + if (NULL == (ret_value = H5VL__datatype_commit(obj, loc_params, connector->cls, name, type_id, lcpl_id, + tcpl_id, tapl_id, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, NULL, "unable to commit datatype"); done: @@ -2881,19 +2870,20 @@ void * H5VLdatatype_open(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, const char *name, hid_t tapl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__datatype_open(obj, loc_params, cls, name, tapl_id, dxpl_id, req))) + if (NULL == + (ret_value = H5VL__datatype_open(obj, loc_params, connector->cls, name, tapl_id, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTOPENOBJ, NULL, "unable to open datatype"); done: @@ -2979,19 +2969,19 @@ herr_t H5VLdatatype_get(void *obj, hid_t connector_id, H5VL_datatype_get_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__datatype_get(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__datatype_get(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "unable to execute datatype get callback"); done: @@ -3078,19 +3068,19 @@ herr_t H5VLdatatype_specific(void *obj, hid_t connector_id, H5VL_datatype_specific_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__datatype_specific(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__datatype_specific(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute datatype specific callback"); done: @@ -3220,19 +3210,19 @@ herr_t H5VLdatatype_optional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__datatype_optional(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__datatype_optional(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute datatype optional callback"); done: @@ -3262,7 +3252,7 @@ H5VLdatatype_optional_op(const char *app_file, const char *app_func, unsigned ap FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (dt = (H5T_t *)H5I_object_verify(type_id, H5I_DATATYPE))) + if (NULL == (dt = H5I_object_verify(type_id, H5I_DATATYPE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype"); /* Set up request token pointer for asynchronous operation */ @@ -3362,19 +3352,19 @@ H5VL_datatype_close(const H5VL_object_t *vol_obj, hid_t dxpl_id, void **req) herr_t H5VLdatatype_close(void *obj, hid_t connector_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__datatype_close(obj, cls, dxpl_id, req) < 0) + if (H5VL__datatype_close(obj, connector->cls, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCLOSEOBJ, FAIL, "unable to close datatype"); done: @@ -3428,20 +3418,15 @@ H5VL__file_create(const H5VL_class_t *cls, const char *name, unsigned flags, hid *------------------------------------------------------------------------- */ void * -H5VL_file_create(const H5VL_connector_prop_t *connector_prop, const char *name, unsigned flags, hid_t fcpl_id, +H5VL_file_create(const H5VL_connector_t *connector, const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, hid_t dxpl_id, void **req) { - H5VL_class_t *cls; /* VOL Class structure for callback info */ - void *ret_value = NULL; /* Return value */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_NOAPI(NULL) - /* Get the connector's class */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_prop->connector_id, H5I_VOL))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); - /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__file_create(cls, name, flags, fcpl_id, fapl_id, dxpl_id, req))) + if (NULL == (ret_value = H5VL__file_create(connector->cls, name, flags, fcpl_id, fapl_id, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, NULL, "file create failed"); done: @@ -3464,23 +3449,19 @@ H5VLfile_create(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, { H5P_genplist_t *plist; /* Property list pointer */ H5VL_connector_prop_t connector_prop; /* Property for VOL connector ID & info */ - H5VL_class_t *cls; /* VOL connector's class struct */ void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT /* Get the VOL info from the fapl */ - if (NULL == (plist = (H5P_genplist_t *)H5I_object(fapl_id))) + if (NULL == (plist = H5I_object(fapl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a file access property list"); if (H5P_peek(plist, H5F_ACS_VOL_CONN_NAME, &connector_prop) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get VOL connector info"); - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_prop.connector_id, H5I_VOL))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); - /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__file_create(cls, name, flags, fcpl_id, fapl_id, dxpl_id, req))) + if (NULL == (ret_value = H5VL__file_create(connector_prop.connector->cls, name, flags, fcpl_id, fapl_id, + dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, NULL, "unable to create file"); done: @@ -3535,13 +3516,16 @@ H5VL__file_open(const H5VL_class_t *cls, const char *name, unsigned flags, hid_t *------------------------------------------------------------------------- */ static herr_t -H5VL__file_open_find_connector_cb(H5PL_type_t plugin_type, const void *plugin_info, void *op_data) +H5VL__file_open_find_connector_cb(H5PL_type_t H5_ATTR_UNUSED plugin_type, + const void H5_ATTR_UNUSED *plugin_info, void *op_data) { H5VL_file_open_find_connector_t *udata = (H5VL_file_open_find_connector_t *)op_data; H5VL_file_specific_args_t vol_cb_args; /* Arguments to VOL callback */ - const H5VL_class_t *cls = (const H5VL_class_t *)plugin_info; + H5VL_connector_t *connector = NULL; + const H5VL_class_t *cls = (const H5VL_class_t *)plugin_info; H5P_genplist_t *fapl_plist; H5P_genplist_t *fapl_plist_copy; + herr_t status; bool is_accessible = false; /* Whether file is accessible */ hid_t connector_id = H5I_INVALID_HID; hid_t fapl_id = H5I_INVALID_HID; @@ -3551,27 +3535,21 @@ H5VL__file_open_find_connector_cb(H5PL_type_t plugin_type, const void *plugin_in assert(udata); assert(udata->filename); - assert(udata->connector_prop); assert(cls); assert(plugin_type == H5PL_TYPE_VOL); - /* Silence compiler */ - (void)plugin_type; - - udata->cls = cls; - /* Attempt to register plugin as a VOL connector */ - if ((connector_id = H5VL__register_connector_by_class(cls, true, H5P_VOL_INITIALIZE_DEFAULT)) < 0) + if (NULL == (connector = H5VL__register_connector_by_class(cls, H5P_VOL_INITIALIZE_DEFAULT))) HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5_ITER_ERROR, "unable to register VOL connector"); /* Setup FAPL with registered VOL connector */ - if (NULL == (fapl_plist = (H5P_genplist_t *)H5I_object_verify(udata->fapl_id, H5I_GENPROP_LST))) + if (NULL == (fapl_plist = H5I_object_verify(udata->fapl_id, H5I_GENPROP_LST))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5_ITER_ERROR, "not a property list"); if ((fapl_id = H5P_copy_plist(fapl_plist, true)) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTCOPY, H5_ITER_ERROR, "can't copy fapl"); - if (NULL == (fapl_plist_copy = (H5P_genplist_t *)H5I_object_verify(fapl_id, H5I_GENPROP_LST))) + if (NULL == (fapl_plist_copy = H5I_object_verify(fapl_id, H5I_GENPROP_LST))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5_ITER_ERROR, "not a property list"); - if (H5P_set_vol(fapl_plist_copy, connector_id, NULL) < 0) + if (H5P_set_vol(fapl_plist_copy, connector, NULL) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, H5_ITER_ERROR, "can't set VOL connector on fapl"); /* Set up VOL callback arguments */ @@ -3580,32 +3558,28 @@ H5VL__file_open_find_connector_cb(H5PL_type_t plugin_type, const void *plugin_in vol_cb_args.args.is_accessible.fapl_id = fapl_id; vol_cb_args.args.is_accessible.accessible = &is_accessible; - if (H5VL_file_specific(NULL, &vol_cb_args, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, H5_ITER_ERROR, "error when checking for accessible HDF5 file"); + H5E_PAUSE_ERRORS + { + status = H5VL_file_specific(NULL, &vol_cb_args, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL); + } + H5E_RESUME_ERRORS - if (is_accessible) { + if (status >= 0 && is_accessible) { /* If the file was accessible with the current VOL connector, return * the FAPL with that VOL connector set on it. */ - - /* Modify 'connector_prop' to point to the VOL connector that - * was actually used to open the file, rather than the original - * VOL connector that was requested. - */ - udata->connector_prop->connector_id = connector_id; - udata->connector_prop->connector_info = NULL; - udata->fapl_id = fapl_id; + udata->cls = cls; ret_value = H5_ITER_STOP; } done: - if (ret_value != H5_ITER_STOP) { + if (connector && H5I_dec_app_ref(connector_id) < 0) + HDONE_ERROR(H5E_ID, H5E_CANTCLOSEOBJ, H5_ITER_ERROR, "can't close VOL connector ID"); + + if (ret_value != H5_ITER_STOP) if (fapl_id >= 0 && H5I_dec_app_ref(fapl_id) < 0) HDONE_ERROR(H5E_PLIST, H5E_CANTCLOSEOBJ, H5_ITER_ERROR, "can't close fapl"); - if (connector_id >= 0 && H5I_dec_app_ref(connector_id) < 0) - HDONE_ERROR(H5E_ID, H5E_CANTCLOSEOBJ, H5_ITER_ERROR, "can't close VOL connector ID"); - } /* end if */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5VL__file_open_find_connector_cb() */ @@ -3624,20 +3598,15 @@ H5VL__file_open_find_connector_cb(H5PL_type_t plugin_type, const void *plugin_in *------------------------------------------------------------------------- */ void * -H5VL_file_open(H5VL_connector_prop_t *connector_prop, const char *name, unsigned flags, hid_t fapl_id, - hid_t dxpl_id, void **req) +H5VL_file_open(H5VL_connector_t *connector, const char *name, unsigned flags, hid_t fapl_id, hid_t dxpl_id, + void **req) { - H5VL_class_t *cls; /* VOL Class structure for callback info */ - void *ret_value = NULL; /* Return value */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_NOAPI(NULL) - /* Get the connector's class */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_prop->connector_id, H5I_VOL))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); - /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__file_open(cls, name, flags, fapl_id, dxpl_id, req))) { + if (NULL == (ret_value = H5VL__file_open(connector->cls, name, flags, fapl_id, dxpl_id, req))) { bool is_default_conn = true; /* Opening the file failed - Determine whether we should search @@ -3645,16 +3614,15 @@ H5VL_file_open(H5VL_connector_prop_t *connector_prop, const char *name, unsigned * to attempt to open the file with. This only occurs if the default * VOL connector was used for the initial file open attempt. */ - H5VL__is_default_conn(fapl_id, connector_prop->connector_id, &is_default_conn); + H5VL__is_default_conn(fapl_id, connector, &is_default_conn); if (is_default_conn) { H5VL_file_open_find_connector_t find_connector_ud; herr_t iter_ret; - find_connector_ud.connector_prop = connector_prop; - find_connector_ud.filename = name; - find_connector_ud.cls = NULL; - find_connector_ud.fapl_id = fapl_id; + find_connector_ud.filename = name; + find_connector_ud.cls = NULL; + find_connector_ud.fapl_id = fapl_id; iter_ret = H5PL_iterate(H5PL_ITER_TYPE_VOL, H5VL__file_open_find_connector_cb, (void *)&find_connector_ud); @@ -3663,13 +3631,8 @@ H5VL_file_open(H5VL_connector_prop_t *connector_prop, const char *name, unsigned "failed to iterate over available VOL connector plugins"); else if (iter_ret) { /* If one of the available VOL connector plugins is - * able to open the file, clear the error stack from any - * previous file open failures and then open the file. - * Otherwise, if no VOL connectors are available, throw - * error from original file open failure. + * able to open the file, open the file with it. */ - H5E_clear_stack(); - if (NULL == (ret_value = H5VL__file_open(find_connector_ud.cls, name, flags, find_connector_ud.fapl_id, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTOPENOBJ, NULL, @@ -3677,6 +3640,9 @@ H5VL_file_open(H5VL_connector_prop_t *connector_prop, const char *name, unsigned find_connector_ud.cls->name); } else + /* Otherwise, if no VOL connectors are available, throw + * error from original file open failure. + */ HGOTO_ERROR(H5E_VOL, H5E_CANTOPENOBJ, NULL, "open failed"); } /* end if */ else @@ -3702,23 +3668,19 @@ H5VLfile_open(const char *name, unsigned flags, hid_t fapl_id, hid_t dxpl_id, vo { H5P_genplist_t *plist; /* Property list pointer */ H5VL_connector_prop_t connector_prop; /* Property for VOL connector ID & info */ - H5VL_class_t *cls; /* VOL connector's class struct */ void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT /* Get the VOL info from the fapl */ - if (NULL == (plist = (H5P_genplist_t *)H5I_object(fapl_id))) + if (NULL == (plist = H5I_object(fapl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a file access property list"); if (H5P_peek(plist, H5F_ACS_VOL_CONN_NAME, &connector_prop) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get VOL connector info"); - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_prop.connector_id, H5I_VOL))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); - /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__file_open(cls, name, flags, fapl_id, dxpl_id, req))) + if (NULL == + (ret_value = H5VL__file_open(connector_prop.connector->cls, name, flags, fapl_id, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTOPENOBJ, NULL, "unable to open file"); done: @@ -3802,19 +3764,19 @@ H5VL_file_get(const H5VL_object_t *vol_obj, H5VL_file_get_args_t *args, hid_t dx herr_t H5VLfile_get(void *obj, hid_t connector_id, H5VL_file_get_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__file_get(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__file_get(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "unable to execute file get callback"); done: @@ -3886,14 +3848,13 @@ H5VL_file_specific(const H5VL_object_t *vol_obj, H5VL_file_specific_args_t *args } /* Get the VOL info from the FAPL */ - if (NULL == (plist = (H5P_genplist_t *)H5I_object(fapl_id))) + if (NULL == (plist = H5I_object(fapl_id))) HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, FAIL, "not a file access property list"); if (H5P_peek(plist, H5F_ACS_VOL_CONN_NAME, &connector_prop) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "can't get VOL connector info"); /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_prop.connector_id, H5I_VOL))) - HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, FAIL, "not a VOL connector ID"); + cls = connector_prop.connector->cls; } /* end if */ /* Set wrapper info in API context, for all other operations */ else { @@ -3936,17 +3897,17 @@ herr_t H5VLfile_specific(void *obj, hid_t connector_id, H5VL_file_specific_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Check args and get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__file_specific(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__file_specific(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute file specific callback"); done: @@ -4031,19 +3992,19 @@ herr_t H5VLfile_optional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__file_optional(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__file_optional(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute file optional callback"); done: @@ -4173,19 +4134,19 @@ H5VL_file_close(const H5VL_object_t *vol_obj, hid_t dxpl_id, void **req) herr_t H5VLfile_close(void *obj, hid_t connector_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__file_close(obj, cls, dxpl_id, req) < 0) + if (H5VL__file_close(obj, connector->cls, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCLOSEFILE, FAIL, "unable to close file"); done: @@ -4274,20 +4235,20 @@ void * H5VLgroup_create(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, const char *name, hid_t lcpl_id, hid_t gcpl_id, hid_t gapl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (NULL == - (ret_value = H5VL__group_create(obj, loc_params, cls, name, lcpl_id, gcpl_id, gapl_id, dxpl_id, req))) + if (NULL == (ret_value = H5VL__group_create(obj, loc_params, connector->cls, name, lcpl_id, gcpl_id, + gapl_id, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, NULL, "unable to create group"); done: @@ -4375,19 +4336,19 @@ void * H5VLgroup_open(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, const char *name, hid_t gapl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__group_open(obj, loc_params, cls, name, gapl_id, dxpl_id, req))) + if (NULL == (ret_value = H5VL__group_open(obj, loc_params, connector->cls, name, gapl_id, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, NULL, "unable to open group"); done: @@ -4471,19 +4432,19 @@ H5VL_group_get(const H5VL_object_t *vol_obj, H5VL_group_get_args_t *args, hid_t herr_t H5VLgroup_get(void *obj, hid_t connector_id, H5VL_group_get_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__group_get(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__group_get(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "unable to execute group get callback"); done: @@ -4569,19 +4530,19 @@ herr_t H5VLgroup_specific(void *obj, hid_t connector_id, H5VL_group_specific_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__group_specific(obj, cls, args, dxpl_id, req) < 0) + if (H5VL__group_specific(obj, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute group specific callback"); done: @@ -4669,20 +4630,20 @@ herr_t H5VLgroup_optional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ /* (Must return value from callback, for iterators) */ - if ((ret_value = H5VL__group_optional(obj, cls, args, dxpl_id, req)) < 0) + if ((ret_value = H5VL__group_optional(obj, connector->cls, args, dxpl_id, req)) < 0) HERROR(H5E_VOL, H5E_CANTOPERATE, "unable to execute group optional callback"); done: @@ -4812,19 +4773,19 @@ H5VL_group_close(const H5VL_object_t *vol_obj, hid_t dxpl_id, void **req) herr_t H5VLgroup_close(void *obj, hid_t connector_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__group_close(obj, cls, dxpl_id, req) < 0) + if (H5VL__group_close(obj, connector->cls, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCLOSEOBJ, FAIL, "unable to close group"); done: @@ -4926,17 +4887,17 @@ herr_t H5VLlink_create(H5VL_link_create_args_t *args, void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__link_create(args, obj, loc_params, cls, lcpl_id, lapl_id, dxpl_id, req) < 0) + if (H5VL__link_create(args, obj, loc_params, connector->cls, lcpl_id, lapl_id, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, FAIL, "unable to create link"); done: @@ -5031,17 +4992,18 @@ H5VLlink_copy(void *src_obj, const H5VL_loc_params_t *loc_params1, void *dst_obj const H5VL_loc_params_t *loc_params2, hid_t connector_id, hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__link_copy(src_obj, loc_params1, dst_obj, loc_params2, cls, lcpl_id, lapl_id, dxpl_id, req) < 0) + if (H5VL__link_copy(src_obj, loc_params1, dst_obj, loc_params2, connector->cls, lcpl_id, lapl_id, dxpl_id, + req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCOPY, FAIL, "unable to copy object"); done: @@ -5140,17 +5102,18 @@ H5VLlink_move(void *src_obj, const H5VL_loc_params_t *loc_params1, void *dst_obj const H5VL_loc_params_t *loc_params2, hid_t connector_id, hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__link_move(src_obj, loc_params1, dst_obj, loc_params2, cls, lcpl_id, lapl_id, dxpl_id, req) < 0) + if (H5VL__link_move(src_obj, loc_params1, dst_obj, loc_params2, connector->cls, lcpl_id, lapl_id, dxpl_id, + req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTMOVE, FAIL, "unable to move object"); done: @@ -5237,19 +5200,19 @@ herr_t H5VLlink_get(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, H5VL_link_get_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__link_get(obj, loc_params, cls, args, dxpl_id, req) < 0) + if (H5VL__link_get(obj, loc_params, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "unable to execute link get callback"); done: @@ -5339,20 +5302,20 @@ herr_t H5VLlink_specific(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, H5VL_link_specific_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ /* (Must return value from callback, for iterators) */ - if ((ret_value = H5VL__link_specific(obj, loc_params, cls, args, dxpl_id, req)) < 0) + if ((ret_value = H5VL__link_specific(obj, loc_params, connector->cls, args, dxpl_id, req)) < 0) HERROR(H5E_VOL, H5E_CANTOPERATE, "unable to execute link specific callback"); done: @@ -5439,19 +5402,19 @@ herr_t H5VLlink_optional(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__link_optional(obj, loc_params, cls, args, dxpl_id, req) < 0) + if (H5VL__link_optional(obj, loc_params, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute link optional callback"); done: @@ -5598,19 +5561,19 @@ void * H5VLobject_open(void *obj, const H5VL_loc_params_t *params, hid_t connector_id, H5I_type_t *opened_type, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - void *ret_value = NULL; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (NULL == (ret_value = H5VL__object_open(obj, params, cls, opened_type, dxpl_id, req))) + if (NULL == (ret_value = H5VL__object_open(obj, params, connector->cls, opened_type, dxpl_id, req))) HGOTO_ERROR(H5E_VOL, H5E_CANTOPENOBJ, NULL, "unable to open object"); done: @@ -5707,20 +5670,20 @@ H5VLobject_copy(void *src_obj, const H5VL_loc_params_t *src_loc_params, const ch const H5VL_loc_params_t *dst_loc_params, const char *dst_name, hid_t connector_id, hid_t ocpypl_id, hid_t lcpl_id, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointers */ + /* Check args and get connector pointers */ if (NULL == src_obj || NULL == dst_obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__object_copy(src_obj, src_loc_params, src_name, dst_obj, dst_loc_params, dst_name, cls, - ocpypl_id, lcpl_id, dxpl_id, req) < 0) + if (H5VL__object_copy(src_obj, src_loc_params, src_name, dst_obj, dst_loc_params, dst_name, + connector->cls, ocpypl_id, lcpl_id, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCOPY, FAIL, "unable to copy object"); done: @@ -5807,19 +5770,19 @@ herr_t H5VLobject_get(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, H5VL_object_get_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__object_get(obj, loc_params, cls, args, dxpl_id, req) < 0) + if (H5VL__object_get(obj, loc_params, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "unable to execute object get callback"); done: @@ -5909,20 +5872,20 @@ herr_t H5VLobject_specific(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, H5VL_object_specific_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); - /* Bypass the H5VLint layer, calling the VOL callback directly */ + /* Call the corresponding internal VOL routine */ /* (Must return value from callback, for iterators) */ - if ((ret_value = (cls->object_cls.specific)(obj, loc_params, args, dxpl_id, req)) < 0) + if ((ret_value = H5VL__object_specific(obj, loc_params, connector->cls, args, dxpl_id, req)) < 0) HERROR(H5E_VOL, H5E_CANTOPERATE, "unable to execute object specific callback"); done: @@ -6009,19 +5972,19 @@ herr_t H5VLobject_optional(void *obj, const H5VL_loc_params_t *loc_params, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__object_optional(obj, loc_params, cls, args, dxpl_id, req) < 0) + if (H5VL__object_optional(obj, loc_params, connector->cls, args, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute object optional callback"); done: @@ -6177,8 +6140,8 @@ herr_t H5VLintrospect_get_conn_cls(void *obj, hid_t connector_id, H5VL_get_conn_lvl_t lvl, const H5VL_class_t **conn_cls /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT @@ -6188,12 +6151,12 @@ H5VLintrospect_get_conn_cls(void *obj, hid_t connector_id, H5VL_get_conn_lvl_t l if (NULL == conn_cls) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL conn_cls pointer"); - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__introspect_get_conn_cls(obj, cls, lvl, conn_cls) < 0) + if (H5VL__introspect_get_conn_cls(obj, connector->cls, lvl, conn_cls) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "can't query connector class"); done: @@ -6248,8 +6211,8 @@ H5VL_introspect_get_cap_flags(const void *info, const H5VL_class_t *cls, uint64_ herr_t H5VLintrospect_get_cap_flags(const void *info, hid_t connector_id, uint64_t *cap_flags /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT @@ -6257,12 +6220,12 @@ H5VLintrospect_get_cap_flags(const void *info, hid_t connector_id, uint64_t *cap if (NULL == cap_flags) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL conn_cls pointer"); - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL_introspect_get_cap_flags(info, cls, cap_flags) < 0) + if (H5VL_introspect_get_cap_flags(info, connector->cls, cap_flags) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "can't query connector's capability flags"); done: @@ -6351,17 +6314,17 @@ herr_t H5VLintrospect_opt_query(void *obj, hid_t connector_id, H5VL_subclass_t subcls, int opt_type, uint64_t *flags /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__introspect_opt_query(obj, cls, subcls, opt_type, flags) < 0) + if (H5VL__introspect_opt_query(obj, connector->cls, subcls, opt_type, flags) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "can't query optional operation support"); done: @@ -6453,17 +6416,17 @@ H5VL_request_wait(const H5VL_object_t *vol_obj, uint64_t timeout, H5VL_request_s herr_t H5VLrequest_wait(void *req, hid_t connector_id, uint64_t timeout, H5VL_request_status_t *status /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__request_wait(req, cls, timeout, status) < 0) + if (H5VL__request_wait(req, connector->cls, timeout, status) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "unable to wait on request"); done: @@ -6557,17 +6520,17 @@ H5VL_request_notify(const H5VL_object_t *vol_obj, H5VL_request_notify_t cb, void herr_t H5VLrequest_notify(void *req, hid_t connector_id, H5VL_request_notify_t cb, void *ctx) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__request_notify(req, cls, cb, ctx) < 0) + if (H5VL__request_notify(req, connector->cls, cb, ctx) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTSET, FAIL, "unable to register notify callback for request"); done: @@ -6658,17 +6621,17 @@ H5VL_request_cancel(const H5VL_object_t *vol_obj, H5VL_request_status_t *status) herr_t H5VLrequest_cancel(void *req, hid_t connector_id, H5VL_request_status_t *status /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__request_cancel(req, cls, status) < 0) + if (H5VL__request_cancel(req, connector->cls, status) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "unable to cancel request"); done: @@ -6761,17 +6724,17 @@ H5VL_request_specific(const H5VL_object_t *vol_obj, H5VL_request_specific_args_t herr_t H5VLrequest_specific(void *req, hid_t connector_id, H5VL_request_specific_args_t *args) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__request_specific(req, cls, args) < 0) + if (H5VL__request_specific(req, connector->cls, args) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute asynchronous request specific callback"); @@ -6865,17 +6828,17 @@ H5VL_request_optional(const H5VL_object_t *vol_obj, H5VL_optional_args_t *args) herr_t H5VLrequest_optional(void *req, hid_t connector_id, H5VL_optional_args_t *args) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__request_optional(req, cls, args) < 0) + if (H5VL__request_optional(req, connector->cls, args) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute asynchronous request optional callback"); @@ -6896,8 +6859,8 @@ H5VLrequest_optional(void *req, hid_t connector_id, H5VL_optional_args_t *args) herr_t H5VLrequest_optional_op(void *req, hid_t connector_id, H5VL_optional_args_t *args) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -6907,12 +6870,12 @@ H5VLrequest_optional_op(void *req, hid_t connector_id, H5VL_optional_args_t *arg if (NULL == args) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid arguments"); - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__request_optional(req, cls, args) < 0) + if (H5VL__request_optional(req, connector->cls, args) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "unable to execute request optional callback"); done: @@ -7003,17 +6966,17 @@ H5VL_request_free(const H5VL_object_t *vol_obj) herr_t H5VLrequest_free(void *req, hid_t connector_id) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + /* Get connector pointer */ + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if (H5VL__request_free(req, cls) < 0) + if (H5VL__request_free(req, connector->cls) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "unable to free request"); done: @@ -7095,19 +7058,19 @@ H5VL_blob_put(const H5VL_object_t *vol_obj, const void *buf, size_t size, void * herr_t H5VLblob_put(void *obj, hid_t connector_id, const void *buf, size_t size, void *blob_id, void *ctx) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ + /* Get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding VOL callback */ - if (H5VL__blob_put(obj, cls, buf, size, blob_id, ctx) < 0) + if (H5VL__blob_put(obj, connector->cls, buf, size, blob_id, ctx) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTSET, FAIL, "blob put failed"); done: @@ -7189,19 +7152,19 @@ H5VL_blob_get(const H5VL_object_t *vol_obj, const void *blob_id, void *buf, size herr_t H5VLblob_get(void *obj, hid_t connector_id, const void *blob_id, void *buf /*out*/, size_t size, void *ctx) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ + /* Get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding VOL callback */ - if (H5VL__blob_get(obj, cls, blob_id, buf, size, ctx) < 0) + if (H5VL__blob_get(obj, connector->cls, blob_id, buf, size, ctx) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "blob get failed"); done: @@ -7282,19 +7245,19 @@ H5VL_blob_specific(const H5VL_object_t *vol_obj, void *blob_id, H5VL_blob_specif herr_t H5VLblob_specific(void *obj, hid_t connector_id, void *blob_id, H5VL_blob_specific_args_t *args) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ + /* Get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding VOL callback */ - if (H5VL__blob_specific(obj, cls, blob_id, args) < 0) + if (H5VL__blob_specific(obj, connector->cls, blob_id, args) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "blob specific operation failed"); done: @@ -7375,19 +7338,19 @@ H5VL_blob_optional(const H5VL_object_t *vol_obj, void *blob_id, H5VL_optional_ar herr_t H5VLblob_optional(void *obj, hid_t connector_id, void *blob_id, H5VL_optional_args_t *args) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Get class pointer */ + /* Get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding VOL callback */ - if (H5VL__blob_optional(obj, cls, blob_id, args) < 0) + if (H5VL__blob_optional(obj, connector->cls, blob_id, args) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTOPERATE, FAIL, "blob optional operation failed"); done: @@ -7496,21 +7459,21 @@ herr_t H5VLtoken_cmp(void *obj, hid_t connector_id, const H5O_token_t *token1, const H5O_token_t *token2, int *cmp_value) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); if (NULL == cmp_value) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid cmp_value pointer"); /* Call the corresponding internal VOL routine */ - if (H5VL__token_cmp(obj, cls, token1, token2, cmp_value) < 0) + if (H5VL__token_cmp(obj, connector->cls, token1, token2, cmp_value) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCOMPARE, FAIL, "object token comparison failed"); done: @@ -7600,15 +7563,15 @@ herr_t H5VLtoken_to_str(void *obj, H5I_type_t obj_type, hid_t connector_id, const H5O_token_t *token, char **token_str) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); if (NULL == token) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid token pointer"); @@ -7616,7 +7579,7 @@ H5VLtoken_to_str(void *obj, H5I_type_t obj_type, hid_t connector_id, const H5O_t HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid token_str pointer"); /* Call the corresponding internal VOL routine */ - if (H5VL__token_to_str(obj, obj_type, cls, token, token_str) < 0) + if (H5VL__token_to_str(obj, obj_type, connector->cls, token, token_str) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTSERIALIZE, FAIL, "object token to string failed"); done: @@ -7706,15 +7669,15 @@ herr_t H5VLtoken_from_str(void *obj, H5I_type_t obj_type, hid_t connector_id, const char *token_str, H5O_token_t *token) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); if (NULL == token) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid token pointer"); @@ -7722,7 +7685,7 @@ H5VLtoken_from_str(void *obj, H5I_type_t obj_type, hid_t connector_id, const cha HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid token_str pointer"); /* Call the corresponding internal VOL routine */ - if (H5VL__token_from_str(obj, obj_type, cls, token_str, token) < 0) + if (H5VL__token_from_str(obj, obj_type, connector->cls, token_str, token) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTUNSERIALIZE, FAIL, "object token from string failed"); done: @@ -7806,19 +7769,19 @@ H5VL_optional(const H5VL_object_t *vol_obj, H5VL_optional_args_t *args, hid_t dx herr_t H5VLoptional(void *obj, hid_t connector_id, H5VL_optional_args_t *args, hid_t dxpl_id, void **req /*out*/) { - H5VL_class_t *cls; /* VOL connector's class struct */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_connector_t *connector; /* VOL connector */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT - /* Check args and get class pointer */ + /* Check args and get connector pointer */ if (NULL == obj) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid object"); - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) + if (NULL == (connector = H5I_object_verify(connector_id, H5I_VOL))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID"); /* Call the corresponding internal VOL routine */ - if ((ret_value = H5VL__optional(obj, cls, args, dxpl_id, req)) < 0) + if ((ret_value = H5VL__optional(obj, connector->cls, args, dxpl_id, req)) < 0) HERROR(H5E_VOL, H5E_CANTOPERATE, "unable to execute optional callback"); done: diff --git a/src/H5VLconnector.h b/src/H5VLconnector.h index 4bf082397f8..19e5e11999c 100644 --- a/src/H5VLconnector.h +++ b/src/H5VLconnector.h @@ -1105,14 +1105,6 @@ H5_DLL void *H5VLobject(hid_t obj_id); * \ingroup H5VLDEV */ H5_DLL hid_t H5VLget_file_type(void *file_obj, hid_t connector_id, hid_t dtype_id); -/** - * \ingroup H5VLDEV - */ -H5_DLL hid_t H5VLpeek_connector_id_by_name(const char *name); -/** - * \ingroup H5VLDEV - */ -H5_DLL hid_t H5VLpeek_connector_id_by_value(H5VL_class_value_t value); /* User-defined optional operations */ H5_DLL herr_t H5VLregister_opt_operation(H5VL_subclass_t subcls, const char *op_name, int *op_val); diff --git a/src/H5VLint.c b/src/H5VLint.c index 19a11e9b6b8..e3e2b28e1a9 100644 --- a/src/H5VLint.c +++ b/src/H5VLint.c @@ -44,8 +44,8 @@ #include "H5VLpkg.h" /* Virtual Object Layer */ /* VOL connectors */ -#include "H5VLnative.h" /* Native VOL connector */ -#include "H5VLpassthru.h" /* Pass-through VOL connector */ +#include "H5VLnative_private.h" /* Native VOL connector */ +#include "H5VLpassthru_private.h" /* Pass-through VOL connector */ /****************/ /* Local Macros */ @@ -66,9 +66,9 @@ * VOL connector, and the ID of the next VOL connector. */ typedef struct H5VL_wrap_ctx_t { - unsigned rc; /* Ref. count for the # of times the context was set / reset */ - H5VL_t *connector; /* VOL connector for "outermost" class to start wrap */ - void *obj_wrap_ctx; /* "wrap context" for outermost connector */ + unsigned rc; /* Ref. count for the # of times the context was set / reset */ + H5VL_connector_t *connector; /* VOL connector for "outermost" class to start wrap */ + void *obj_wrap_ctx; /* "wrap context" for outermost connector */ } H5VL_wrap_ctx_t; /* Information needed for iterating over the registered VOL connector hid_t IDs. @@ -92,12 +92,14 @@ typedef struct { /********************/ /* Local Prototypes */ /********************/ -static herr_t H5VL__free_cls(H5VL_class_t *cls, void **request); -static int H5VL__get_connector_cb(void *obj, hid_t id, void *_op_data); -static void *H5VL__wrap_obj(void *obj, H5I_type_t obj_type); -static H5VL_object_t *H5VL__new_vol_obj(H5I_type_t type, void *object, H5VL_t *vol_connector, bool wrap_obj); -static void *H5VL__object(hid_t id, H5I_type_t obj_type); -static herr_t H5VL__free_vol_wrapper(H5VL_wrap_ctx_t *vol_wrap_ctx); +static herr_t H5VL__free_cls(H5VL_class_t *cls); +static void *H5VL__wrap_obj(void *obj, H5I_type_t obj_type); +static H5VL_connector_t *H5VL__conn_create(H5VL_class_t *cls); +static herr_t H5VL__conn_find(H5PL_vol_key_t *key, H5VL_connector_t **connector); +static herr_t H5VL__conn_free(H5VL_connector_t *connector); +static herr_t H5VL__conn_free_id(H5VL_connector_t *connector, void H5_ATTR_UNUSED **request); +static void *H5VL__object(hid_t id, H5I_type_t obj_type); +static herr_t H5VL__free_vol_wrapper(H5VL_wrap_ctx_t *vol_wrap_ctx); /*********************/ /* Package Variables */ @@ -113,17 +115,17 @@ static herr_t H5VL__free_vol_wrapper(H5VL_wrap_ctx_t *vol_wrap_ctx); /* VOL ID class */ static const H5I_class_t H5I_VOL_CLS[1] = {{ - H5I_VOL, /* ID class value */ - 0, /* Class flags */ - 0, /* # of reserved IDs for class */ - (H5I_free_t)H5VL__free_cls /* Callback routine for closing objects of this class */ + H5I_VOL, /* ID class value */ + 0, /* Class flags */ + 0, /* # of reserved IDs for class */ + (H5I_free_t)H5VL__conn_free_id /* Callback routine for closing objects of this class */ }}; /* Declare a free list to manage the H5VL_class_t struct */ H5FL_DEFINE_STATIC(H5VL_class_t); -/* Declare a free list to manage the H5VL_t struct */ -H5FL_DEFINE(H5VL_t); +/* Declare a free list to manage the H5VL_connector_t struct */ +H5FL_DEFINE_STATIC(H5VL_connector_t); /* Declare a free list to manage the H5VL_object_t struct */ H5FL_DEFINE(H5VL_object_t); @@ -131,8 +133,11 @@ H5FL_DEFINE(H5VL_object_t); /* Declare a free list to manage the H5VL_wrap_ctx_t struct */ H5FL_DEFINE_STATIC(H5VL_wrap_ctx_t); +/* List of currently active VOL connectors */ +static H5VL_connector_t *H5VL_conn_list_head_g = NULL; + /* Default VOL connector */ -static H5VL_connector_prop_t H5VL_def_conn_s = {-1, NULL}; +static H5VL_connector_prop_t H5VL_def_conn_s = {NULL, NULL}; /*------------------------------------------------------------------------- * Function: H5VL_init_phase1 @@ -212,8 +217,14 @@ H5VL_init_phase2(void) /* clang-format on */ + /* Register internal VOL connectors */ + if (H5VL__native_register() < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, FAIL, "unable to register native VOL connector"); + if (H5VL__passthru_register() < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, FAIL, "unable to register passthru VOL connector"); + /* Sanity check default VOL connector */ - assert(H5VL_def_conn_s.connector_id == (-1)); + assert(H5VL_def_conn_s.connector == NULL); assert(H5VL_def_conn_s.connector_info == NULL); /* Set up the default VOL connector in the default FAPL */ @@ -242,10 +253,10 @@ H5VL_term_package(void) FUNC_ENTER_NOAPI_NOINIT_NOERR - if (H5VL_def_conn_s.connector_id > 0) { + if (H5VL_def_conn_s.connector) { /* Release the default VOL connector */ - (void)H5VL_conn_free(&H5VL_def_conn_s); - H5VL_def_conn_s.connector_id = -1; + (void)H5VL_conn_prop_free(&H5VL_def_conn_s); + H5VL_def_conn_s.connector = NULL; H5VL_def_conn_s.connector_info = NULL; n++; } /* end if */ @@ -253,6 +264,11 @@ H5VL_term_package(void) if (H5I_nmembers(H5I_VOL) > 0) { /* Unregister all VOL connectors */ (void)H5I_clear_type(H5I_VOL, true, false); + + /* Reset internal VOL connectors' global vars */ + (void)H5VL__native_unregister(); + (void)H5VL__passthru_unregister(); + n++; } /* end if */ else { @@ -284,7 +300,7 @@ H5VL_term_package(void) *------------------------------------------------------------------------- */ static herr_t -H5VL__free_cls(H5VL_class_t *cls, void H5_ATTR_UNUSED **request) +H5VL__free_cls(H5VL_class_t *cls) { herr_t ret_value = SUCCEED; @@ -305,43 +321,6 @@ H5VL__free_cls(H5VL_class_t *cls, void H5_ATTR_UNUSED **request) FUNC_LEAVE_NOAPI(ret_value) } /* end H5VL__free_cls() */ -/*------------------------------------------------------------------------- - * Function: H5VL__get_connector_cb - * - * Purpose: Callback routine to search through registered VOLs - * - * Return: Success: H5_ITER_STOP if the class and op_data name - * members match. H5_ITER_CONT otherwise. - * Failure: Can't fail - * - *------------------------------------------------------------------------- - */ -static int -H5VL__get_connector_cb(void *obj, hid_t id, void *_op_data) -{ - H5VL_get_connector_ud_t *op_data = (H5VL_get_connector_ud_t *)_op_data; /* User data for callback */ - H5VL_class_t *cls = (H5VL_class_t *)obj; - int ret_value = H5_ITER_CONT; /* Callback return value */ - - FUNC_ENTER_PACKAGE_NOERR - - if (H5VL_GET_CONNECTOR_BY_NAME == op_data->key.kind) { - if (0 == strcmp(cls->name, op_data->key.u.name)) { - op_data->found_id = id; - ret_value = H5_ITER_STOP; - } /* end if */ - } /* end if */ - else { - assert(H5VL_GET_CONNECTOR_BY_VALUE == op_data->key.kind); - if (cls->value == op_data->key.u.value) { - op_data->found_id = id; - ret_value = H5_ITER_STOP; - } /* end if */ - } /* end else */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL__get_connector_cb() */ - /*------------------------------------------------------------------------- * Function: H5VL__set_def_conn * @@ -359,22 +338,22 @@ H5VL__get_connector_cb(void *obj, hid_t id, void *_op_data) herr_t H5VL__set_def_conn(void) { - H5P_genplist_t *def_fapl; /* Default file access property list */ - H5P_genclass_t *def_fapclass; /* Default file access property class */ - const char *env_var; /* Environment variable for default VOL connector */ - char *buf = NULL; /* Buffer for tokenizing string */ - hid_t connector_id = -1; /* VOL connector ID */ - void *vol_info = NULL; /* VOL connector info */ - herr_t ret_value = SUCCEED; /* Return value */ + H5P_genplist_t *def_fapl; /* Default file access property list */ + H5P_genclass_t *def_fapclass; /* Default file access property class */ + const char *env_var; /* Environment variable for default VOL connector */ + char *buf = NULL; /* Buffer for tokenizing string */ + H5VL_connector_t *connector = NULL; /* VOL connector */ + void *vol_info = NULL; /* VOL connector info */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* Reset default VOL connector, if it's set already */ /* (Can happen during testing -QAK) */ - if (H5VL_def_conn_s.connector_id > 0) { + if (H5VL_def_conn_s.connector) { /* Release the default VOL connector */ - (void)H5VL_conn_free(&H5VL_def_conn_s); - H5VL_def_conn_s.connector_id = -1; + (void)H5VL_conn_prop_free(&H5VL_def_conn_s); + H5VL_def_conn_s.connector = NULL; H5VL_def_conn_s.connector_info = NULL; } /* end if */ @@ -403,51 +382,51 @@ H5VL__set_def_conn(void) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "can't check if VOL connector already registered"); else if (connector_is_registered) { /* Retrieve the ID of the already-registered VOL connector */ - if ((connector_id = H5VL__get_connector_id_by_name(tok, false)) < 0) + if (NULL == (connector = H5VL__get_connector_by_name(tok))) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "can't get VOL connector ID"); } /* end else-if */ else { /* Check for VOL connectors that ship with the library */ if (!strcmp(tok, "native")) { - connector_id = H5VL_NATIVE; - if (H5I_inc_ref(connector_id, false) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTINC, FAIL, "can't increment VOL connector refcount"); + connector = H5VL_NATIVE_conn_g; + + /* Inc. refcount on connector object, so it can be uniformly released */ + H5VL_conn_inc_rc(connector); } /* end if */ else if (!strcmp(tok, "pass_through")) { - connector_id = H5VL_PASSTHRU; - if (H5I_inc_ref(connector_id, false) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTINC, FAIL, "can't increment VOL connector refcount"); + connector = H5VL_PASSTHRU_conn_g; + + /* Inc. refcount on connector object, so it can be uniformly released */ + H5VL_conn_inc_rc(connector); } /* end else-if */ else { /* Register the VOL connector */ /* (NOTE: No provisions for vipl_id currently) */ - if ((connector_id = H5VL__register_connector_by_name(tok, true, H5P_VOL_INITIALIZE_DEFAULT)) < - 0) + if (NULL == (connector = H5VL__register_connector_by_name(tok, H5P_VOL_INITIALIZE_DEFAULT))) HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, FAIL, "can't register connector"); } /* end else */ } /* end else */ /* Was there any connector info specified in the environment variable? */ if (NULL != (tok = HDstrtok_r(NULL, "\n\r", &lasts))) - if (H5VL__connector_str_to_info(tok, connector_id, &vol_info) < 0) + if (H5VL__connector_str_to_info(tok, connector, &vol_info) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTDECODE, FAIL, "can't deserialize connector info"); /* Set the default VOL connector */ - H5VL_def_conn_s.connector_id = connector_id; + H5VL_def_conn_s.connector = connector; H5VL_def_conn_s.connector_info = vol_info; } /* end if */ else { /* Set the default VOL connector */ - H5VL_def_conn_s.connector_id = H5_DEFAULT_VOL; + H5VL_def_conn_s.connector = H5_DEFAULT_VOL; H5VL_def_conn_s.connector_info = NULL; /* Increment the ref count on the default connector */ - if (H5I_inc_ref(H5VL_def_conn_s.connector_id, false) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTINC, FAIL, "can't increment VOL connector refcount"); + H5VL_conn_inc_rc(H5VL_def_conn_s.connector); } /* end else */ /* Get default file access pclass */ - if (NULL == (def_fapclass = (H5P_genclass_t *)H5I_object(H5P_FILE_ACCESS))) + if (NULL == (def_fapclass = H5I_object(H5P_FILE_ACCESS))) HGOTO_ERROR(H5E_VOL, H5E_BADID, FAIL, "can't find object for default file access property class ID"); /* Change the default VOL for the default file access pclass */ @@ -456,22 +435,22 @@ H5VL__set_def_conn(void) "can't set default VOL connector for default file access property class"); /* Get default file access plist */ - if (NULL == (def_fapl = (H5P_genplist_t *)H5I_object(H5P_FILE_ACCESS_DEFAULT))) + if (NULL == (def_fapl = H5I_object(H5P_FILE_ACCESS_DEFAULT))) HGOTO_ERROR(H5E_VOL, H5E_BADID, FAIL, "can't find object for default fapl ID"); /* Change the default VOL for the default FAPL */ - if (H5P_set_vol(def_fapl, H5VL_def_conn_s.connector_id, H5VL_def_conn_s.connector_info) < 0) + if (H5P_set_vol(def_fapl, H5VL_def_conn_s.connector, H5VL_def_conn_s.connector_info) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTSET, FAIL, "can't set default VOL connector for default FAPL"); done: /* Clean up on error */ if (ret_value < 0) { if (vol_info) - if (H5VL_free_connector_info(connector_id, vol_info) < 0) + if (H5VL_free_connector_info(connector, vol_info) < 0) HDONE_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "can't free VOL connector info"); - if (connector_id >= 0) - /* The H5VL_class_t struct will be freed by this function */ - if (H5I_dec_ref(connector_id) < 0) + if (connector) + /* The H5VL_connector_t struct will be freed by this function */ + if (H5VL_conn_dec_rc(connector) < 0) HDONE_ERROR(H5E_VOL, H5E_CANTDEC, FAIL, "unable to unregister VOL connector"); } /* end if */ @@ -522,7 +501,7 @@ H5VL__wrap_obj(void *obj, H5I_type_t obj_type) } /* end H5VL__wrap_obj() */ /*------------------------------------------------------------------------- - * Function: H5VL__new_vol_obj + * Function: H5VL_new_vol_obj * * Purpose: Creates a new VOL object, to use when registering an ID. * @@ -531,18 +510,18 @@ H5VL__wrap_obj(void *obj, H5I_type_t obj_type) * *------------------------------------------------------------------------- */ -static H5VL_object_t * -H5VL__new_vol_obj(H5I_type_t type, void *object, H5VL_t *vol_connector, bool wrap_obj) +H5VL_object_t * +H5VL_new_vol_obj(H5I_type_t type, void *object, H5VL_connector_t *connector, bool wrap_obj) { H5VL_object_t *new_vol_obj = NULL; /* Pointer to new VOL object */ bool conn_rc_incr = false; /* Whether the VOL connector refcount has been incremented */ H5VL_object_t *ret_value = NULL; /* Return value */ - FUNC_ENTER_PACKAGE + FUNC_ENTER_NOAPI(NULL) /* Check arguments */ assert(object); - assert(vol_connector); + assert(connector); /* Make sure type number is valid */ if (type != H5I_ATTR && type != H5I_DATASET && type != H5I_DATATYPE && type != H5I_FILE && @@ -552,7 +531,7 @@ H5VL__new_vol_obj(H5I_type_t type, void *object, H5VL_t *vol_connector, bool wra /* Create the new VOL object */ if (NULL == (new_vol_obj = H5FL_CALLOC(H5VL_object_t))) HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, NULL, "can't allocate memory for VOL object"); - new_vol_obj->connector = vol_connector; + new_vol_obj->connector = connector; if (wrap_obj) { if (NULL == (new_vol_obj->data = H5VL__wrap_obj(object, type))) HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, NULL, "can't wrap library object"); @@ -562,7 +541,7 @@ H5VL__new_vol_obj(H5I_type_t type, void *object, H5VL_t *vol_connector, bool wra new_vol_obj->rc = 1; /* Bump the reference count on the VOL connector */ - H5VL_conn_inc_rc(vol_connector); + H5VL_conn_inc_rc(connector); conn_rc_incr = true; /* If this is a datatype, we have to hide the VOL object under the H5T_t pointer */ @@ -576,7 +555,7 @@ H5VL__new_vol_obj(H5I_type_t type, void *object, H5VL_t *vol_connector, bool wra done: /* Cleanup on error */ if (NULL == ret_value) { - if (conn_rc_incr && H5VL_conn_dec_rc(vol_connector) < 0) + if (conn_rc_incr && H5VL_conn_dec_rc(connector) < 0) HDONE_ERROR(H5E_VOL, H5E_CANTDEC, NULL, "unable to decrement ref count on VOL connector"); if (new_vol_obj) { @@ -587,12 +566,12 @@ H5VL__new_vol_obj(H5I_type_t type, void *object, H5VL_t *vol_connector, bool wra } /* end if */ FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL__new_vol_obj() */ +} /* end H5VL_new_vol_obj() */ /*------------------------------------------------------------------------- - * Function: H5VL_conn_copy + * Function: H5VL_conn_prop_copy * - * Purpose: Copy VOL connector ID & info. + * Purpose: Copy VOL connector property. * * Note: This is an "in-place" copy. * @@ -602,7 +581,7 @@ H5VL__new_vol_obj(H5I_type_t type, void *object, H5VL_t *vol_connector, bool wra *------------------------------------------------------------------------- */ herr_t -H5VL_conn_copy(H5VL_connector_prop_t *connector_prop) +H5VL_conn_prop_copy(H5VL_connector_prop_t *connector_prop) { herr_t ret_value = SUCCEED; /* Return value */ @@ -610,24 +589,17 @@ H5VL_conn_copy(H5VL_connector_prop_t *connector_prop) if (connector_prop) { /* Copy the connector ID & info, if there is one */ - if (connector_prop->connector_id > 0) { - /* Increment the reference count on connector ID and copy connector info */ - if (H5I_inc_ref(connector_prop->connector_id, false) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTINC, FAIL, - "unable to increment ref count on VOL connector ID"); + if (connector_prop->connector) { + /* Increment the reference count on connector */ + H5VL_conn_inc_rc(connector_prop->connector); /* Copy connector info, if it exists */ if (connector_prop->connector_info) { - H5VL_class_t *connector; /* Pointer to connector */ - void *new_connector_info = NULL; /* Copy of connector info */ - - /* Retrieve the connector for the ID */ - if (NULL == (connector = (H5VL_class_t *)H5I_object(connector_prop->connector_id))) - HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not a VOL connector ID"); + void *new_connector_info = NULL; /* Copy of connector info */ /* Allocate and copy connector info */ - if (H5VL_copy_connector_info(connector, &new_connector_info, connector_prop->connector_info) < - 0) + if (H5VL_copy_connector_info(connector_prop->connector, &new_connector_info, + connector_prop->connector_info) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTCOPY, FAIL, "connector info copy failed"); /* Set the connector info to the copy */ @@ -638,12 +610,73 @@ H5VL_conn_copy(H5VL_connector_prop_t *connector_prop) done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL_conn_copy() */ +} /* end H5VL_conn_prop_copy() */ + +/*------------------------------------------------------------------------- + * Function: H5VL_conn_prop_cmp + * + * Purpose: Compare two VOL connector properties. + * + * Return: positive if PROP1 is greater than PROP2, negative if PROP2 + * is greater than PROP1 and zero if PROP1 and PROP2 are equal. + * + *------------------------------------------------------------------------- + */ +herr_t +H5VL_conn_prop_cmp(int *cmp_value, const H5VL_connector_prop_t *prop1, const H5VL_connector_prop_t *prop2) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Check arguments */ + assert(cmp_value); + assert(prop1); + assert(prop2); + + /* Fast check */ + if (prop1 == prop2) + /* Set output comparison value */ + *cmp_value = 0; + else { + H5VL_connector_t *conn1, *conn2; /* Connector for each property */ + int tmp_cmp_value = 0; /* Value from comparison */ + + /* Compare connectors' classes */ + conn1 = prop1->connector; + conn2 = prop2->connector; + if (H5VL_cmp_connector_cls(&tmp_cmp_value, conn1->cls, conn2->cls) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); + if (tmp_cmp_value != 0) + /* Set output comparison value */ + *cmp_value = tmp_cmp_value; + else { + /* At this point, we should be able to assume that we are dealing with + * the same connector class struct (or a copies of the same class struct) + */ + + /* Use one of the classes (cls1) info comparison routines to compare the + * info objects + */ + assert(conn1->cls->info_cls.cmp == conn2->cls->info_cls.cmp); + tmp_cmp_value = 0; + if (H5VL_cmp_connector_info(conn1, &tmp_cmp_value, prop1->connector_info, prop2->connector_info) < + 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTCOMPARE, FAIL, "can't compare connector class info"); + + /* Set output comparison value */ + *cmp_value = tmp_cmp_value; + } + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5VL_conn_prop_same() */ /*------------------------------------------------------------------------- - * Function: H5VL_conn_free + * Function: H5VL_conn_prop_free * - * Purpose: Free VOL connector ID & info. + * Purpose: Free VOL connector property * * Return: Success: Non-negative * Failure: Negative @@ -651,7 +684,7 @@ H5VL_conn_copy(H5VL_connector_prop_t *connector_prop) *------------------------------------------------------------------------- */ herr_t -H5VL_conn_free(const H5VL_connector_prop_t *connector_prop) +H5VL_conn_prop_free(const H5VL_connector_prop_t *connector_prop) { herr_t ret_value = SUCCEED; /* Return value */ @@ -659,23 +692,22 @@ H5VL_conn_free(const H5VL_connector_prop_t *connector_prop) if (connector_prop) { /* Free the connector info (if it exists) and decrement the ID */ - if (connector_prop->connector_id > 0) { + if (connector_prop->connector) { if (connector_prop->connector_info) /* Free the connector info */ - if (H5VL_free_connector_info(connector_prop->connector_id, connector_prop->connector_info) < - 0) + if (H5VL_free_connector_info(connector_prop->connector, connector_prop->connector_info) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "unable to release VOL connector info object"); /* Decrement reference count for connector ID */ - if (H5I_dec_ref(connector_prop->connector_id) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTDEC, FAIL, "can't decrement reference count for connector ID"); + if (H5VL_conn_dec_rc(connector_prop->connector) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTDEC, FAIL, "can't decrement reference count for connector"); } } done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL_conn_free() */ +} /* end H5VL_conn_prop_free() */ /*------------------------------------------------------------------------- * Function: H5VL_register @@ -690,7 +722,7 @@ H5VL_conn_free(const H5VL_connector_prop_t *connector_prop) *------------------------------------------------------------------------- */ hid_t -H5VL_register(H5I_type_t type, void *object, H5VL_t *vol_connector, bool app_ref) +H5VL_register(H5I_type_t type, void *object, H5VL_connector_t *vol_connector, bool app_ref) { H5VL_object_t *vol_obj = NULL; /* VOL object wrapper for library object */ hid_t ret_value = H5I_INVALID_HID; /* Return value */ @@ -703,7 +735,7 @@ H5VL_register(H5I_type_t type, void *object, H5VL_t *vol_connector, bool app_ref /* Set up VOL object for the passed-in data */ /* (Does not wrap object, since it's from a VOL callback) */ - if (NULL == (vol_obj = H5VL__new_vol_obj(type, object, vol_connector, false))) + if (NULL == (vol_obj = H5VL_new_vol_obj(type, object, vol_connector, false))) HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, H5I_INVALID_HID, "can't create VOL object"); /* Register VOL object as _object_ type, for future object API calls */ @@ -733,7 +765,7 @@ H5VL_register(H5I_type_t type, void *object, H5VL_t *vol_connector, bool app_ref *------------------------------------------------------------------------- */ herr_t -H5VL_register_using_existing_id(H5I_type_t type, void *object, H5VL_t *vol_connector, bool app_ref, +H5VL_register_using_existing_id(H5I_type_t type, void *object, H5VL_connector_t *vol_connector, bool app_ref, hid_t existing_id) { H5VL_object_t *new_vol_obj = NULL; /* Pointer to new VOL object */ @@ -747,7 +779,7 @@ H5VL_register_using_existing_id(H5I_type_t type, void *object, H5VL_t *vol_conne /* Set up VOL object for the passed-in data */ /* (Wraps object, since it's a library object) */ - if (NULL == (new_vol_obj = H5VL__new_vol_obj(type, object, vol_connector, true))) + if (NULL == (new_vol_obj = H5VL_new_vol_obj(type, object, vol_connector, true))) HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, FAIL, "can't create VOL object"); /* Call the underlying H5I function to complete the registration */ @@ -759,189 +791,159 @@ H5VL_register_using_existing_id(H5I_type_t type, void *object, H5VL_t *vol_conne } /* end H5VL_register_using_existing_id() */ /*------------------------------------------------------------------------- - * Function: H5VL_new_connector + * Function: H5VL_create_object * - * Purpose: Utility function to create a connector for a connector ID. + * Purpose: Similar to H5VL_register but does not create an ID. + * Creates a new VOL object for the provided generic object + * using the provided vol connector. Should only be used for + * internal objects returned from the connector such as + * requests. * - * Return: Success: Pointer to a new connector object + * Return: Success: A valid VOL object * Failure: NULL * *------------------------------------------------------------------------- */ -H5VL_t * -H5VL_new_connector(hid_t connector_id) +H5VL_object_t * +H5VL_create_object(void *object, H5VL_connector_t *vol_connector) { - H5VL_class_t *cls = NULL; /* VOL connector class */ - H5VL_t *connector = NULL; /* New VOL connector struct */ - bool conn_id_incr = false; /* Whether the VOL connector ID has been incremented */ - H5VL_t *ret_value = NULL; /* Return value */ + H5VL_object_t *ret_value = NULL; /* Return value */ FUNC_ENTER_NOAPI(NULL) - /* Get the VOL class object from the connector's ID */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) - HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, NULL, "not a VOL connector ID"); + /* Check arguments */ + assert(object); + assert(vol_connector); - /* Setup VOL info struct */ - if (NULL == (connector = H5FL_CALLOC(H5VL_t))) - HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, NULL, "can't allocate VOL connector struct"); - connector->cls = cls; - connector->id = connector_id; - if (H5I_inc_ref(connector->id, false) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTINC, NULL, "unable to increment ref count on VOL connector"); - conn_id_incr = true; + /* Set up VOL object for the passed-in data */ + /* (Does not wrap object, since it's from a VOL callback) */ + if (NULL == (ret_value = H5FL_CALLOC(H5VL_object_t))) + HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, NULL, "can't allocate memory for VOL object"); + ret_value->connector = vol_connector; + ret_value->data = object; + ret_value->rc = 1; - /* Set return value */ - ret_value = connector; + /* Bump the reference count on the VOL connector */ + H5VL_conn_inc_rc(vol_connector); done: - /* Clean up on error */ - if (NULL == ret_value) { - /* Decrement VOL connector ID ref count on error */ - if (conn_id_incr && H5I_dec_ref(connector_id) < 0) - HDONE_ERROR(H5E_VOL, H5E_CANTDEC, NULL, "unable to decrement ref count on VOL connector"); - - /* Free VOL connector struct */ - if (NULL != connector) - connector = H5FL_FREE(H5VL_t, connector); - } /* end if */ - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL_new_connector() */ +} /* end H5VL_create_object() */ /*------------------------------------------------------------------------- - * Function: H5VL_register_using_vol_id + * Function: H5VL__conn_create * - * Purpose: Utility function to create a user ID for an object created - * or opened through the VOL. Uses the VOL connector's ID to - * get the connector information instead of it being passed in. + * Purpose: Utility function to create a connector around a class * - * Return: Success: A valid HDF5 ID - * Failure: H5I_INVALID_HID + * Return: Success: Pointer to a new connector object + * Failure: NULL * *------------------------------------------------------------------------- */ -hid_t -H5VL_register_using_vol_id(H5I_type_t type, void *obj, hid_t connector_id, bool app_ref) +static H5VL_connector_t * +H5VL__conn_create(H5VL_class_t *cls) { - H5VL_t *connector = NULL; /* VOL connector struct */ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_connector_t *connector = NULL; /* New VOL connector struct */ + H5VL_connector_t *ret_value = NULL; /* Return value */ - FUNC_ENTER_NOAPI(FAIL) + FUNC_ENTER_PACKAGE - /* Create new VOL connector object, using the connector ID */ - if (NULL == (connector = H5VL_new_connector(connector_id))) - HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, H5I_INVALID_HID, "can't create VOL connector object"); + /* Sanity check */ + assert(cls); - /* Get an ID for the VOL object */ - if ((ret_value = H5VL_register(type, obj, connector, app_ref)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register object handle"); + /* Setup VOL info struct */ + if (NULL == (connector = H5FL_CALLOC(H5VL_connector_t))) + HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, NULL, "can't allocate VOL connector struct"); + connector->cls = cls; -done: - /* Clean up on error */ - if (H5I_INVALID_HID == ret_value) - /* Release newly created connector */ - if (connector && H5VL_conn_dec_rc(connector) < 0) - HDONE_ERROR(H5E_VOL, H5E_CANTDEC, H5I_INVALID_HID, - "unable to decrement ref count on VOL connector") + /* Add connector to list of active VOL connectors */ + if (H5VL_conn_list_head_g) { + connector->next = H5VL_conn_list_head_g; + H5VL_conn_list_head_g->prev = connector; + } + H5VL_conn_list_head_g = connector; + /* Set return value */ + ret_value = connector; + +done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL_register_using_vol_id() */ +} /* end H5VL__conn_create() */ /*------------------------------------------------------------------------- - * Function: H5VL_create_object + * Function: H5VL_conn_register * - * Purpose: Similar to H5VL_register but does not create an ID. - * Creates a new VOL object for the provided generic object - * using the provided vol connector. Should only be used for - * internal objects returned from the connector such as - * requests. + * Purpose: Registers an existing VOL connector with a new ID * - * Return: Success: A valid VOL object - * Failure: NULL + * Return: Success: VOL connector ID + * Failure: H5I_INVALID_HID * *------------------------------------------------------------------------- */ -H5VL_object_t * -H5VL_create_object(void *object, H5VL_t *vol_connector) +hid_t +H5VL_conn_register(H5VL_connector_t *connector) { - H5VL_object_t *ret_value = NULL; /* Return value */ + hid_t ret_value = H5I_INVALID_HID; - FUNC_ENTER_NOAPI(NULL) + FUNC_ENTER_NOAPI(H5I_INVALID_HID) /* Check arguments */ - assert(object); - assert(vol_connector); + assert(connector); - /* Set up VOL object for the passed-in data */ - /* (Does not wrap object, since it's from a VOL callback) */ - if (NULL == (ret_value = H5FL_CALLOC(H5VL_object_t))) - HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, NULL, "can't allocate memory for VOL object"); - ret_value->connector = vol_connector; - ret_value->data = object; - ret_value->rc = 1; + /* Create a ID for the connector */ + if ((ret_value = H5I_register(H5I_VOL, connector, true)) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector ID"); - /* Bump the reference count on the VOL connector */ - H5VL_conn_inc_rc(vol_connector); + /* ID is holding a reference to the connector */ + H5VL_conn_inc_rc(connector); done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL_create_object() */ +} /* end H5VL_conn_register() */ /*------------------------------------------------------------------------- - * Function: H5VL_create_object_using_vol_id + * Function: H5VL__conn_find * - * Purpose: Similar to H5VL_register_using_vol_id but does not create - * an id. Intended for use by internal library routines, - * therefore it wraps the object. + * Purpose: Find a matching connector * - * Return: Success: VOL object pointer - * Failure: NULL + * Return: SUCCEED/FAIL * *------------------------------------------------------------------------- */ -H5VL_object_t * -H5VL_create_object_using_vol_id(H5I_type_t type, void *obj, hid_t connector_id) +static herr_t +H5VL__conn_find(H5PL_vol_key_t *key, H5VL_connector_t **connector) { - H5VL_class_t *cls = NULL; /* VOL connector class */ - H5VL_t *connector = NULL; /* VOL connector struct */ - bool conn_id_incr = false; /* Whether the VOL connector ID has been incremented */ - H5VL_object_t *ret_value = NULL; /* Return value */ - - FUNC_ENTER_NOAPI(NULL) - - /* Get the VOL class object from the connector's ID */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) - HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, NULL, "not a VOL connector ID"); + H5VL_connector_t *node; /* Current node in linked list */ - /* Setup VOL info struct */ - if (NULL == (connector = H5FL_CALLOC(H5VL_t))) - HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, NULL, "can't allocate VOL info struct"); - connector->cls = cls; - connector->id = connector_id; - if (H5I_inc_ref(connector->id, false) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTINC, NULL, "unable to increment ref count on VOL connector"); - conn_id_incr = true; + FUNC_ENTER_PACKAGE_NOERR - /* Set up VOL object for the passed-in data */ - /* (Wraps object, since it's a library object) */ - if (NULL == (ret_value = H5VL__new_vol_obj(type, obj, connector, true))) - HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, NULL, "can't create VOL object"); + /* Check arguments */ + assert(key); + assert(connector); -done: - /* Clean up on error */ - if (!ret_value) { - /* Decrement VOL connector ID ref count on error */ - if (conn_id_incr && H5I_dec_ref(connector_id) < 0) - HDONE_ERROR(H5E_VOL, H5E_CANTDEC, NULL, "unable to decrement ref count on VOL connector"); + /* Iterate over linked list of active connectors */ + node = H5VL_conn_list_head_g; + while (node) { + if (H5VL_GET_CONNECTOR_BY_NAME == key->kind) { + if (0 == strcmp(node->cls->name, key->u.name)) { + *connector = node; + break; + } /* end if */ + } /* end if */ + else { + assert(H5VL_GET_CONNECTOR_BY_VALUE == key->kind); + if (node->cls->value == key->u.value) { + *connector = node; + break; + } /* end if */ + } /* end else */ - /* Free VOL connector struct */ - if (NULL != connector) - connector = H5FL_FREE(H5VL_t, connector); - } /* end if */ + /* Advance to next node */ + node = node->next; + } - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL_create_object_using_vol_id() */ + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5VL__conn_find() */ /*------------------------------------------------------------------------- * Function: H5VL_conn_inc_rc @@ -953,7 +955,7 @@ H5VL_create_object_using_vol_id(H5I_type_t type, void *obj, hid_t connector_id) *------------------------------------------------------------------------- */ int64_t -H5VL_conn_inc_rc(H5VL_t *connector) +H5VL_conn_inc_rc(H5VL_connector_t *connector) { int64_t ret_value = -1; @@ -965,6 +967,7 @@ H5VL_conn_inc_rc(H5VL_t *connector) /* Increment refcount for connector */ connector->nrefs++; + /* Set return value */ ret_value = connector->nrefs; FUNC_LEAVE_NOAPI(ret_value) @@ -980,7 +983,7 @@ H5VL_conn_inc_rc(H5VL_t *connector) *------------------------------------------------------------------------- */ int64_t -H5VL_conn_dec_rc(H5VL_t *connector) +H5VL_conn_dec_rc(H5VL_connector_t *connector) { int64_t ret_value = -1; /* Return value */ @@ -992,23 +995,123 @@ H5VL_conn_dec_rc(H5VL_t *connector) /* Decrement refcount for connector */ connector->nrefs--; - /* Check for last reference */ - if (0 == connector->nrefs) { - if (H5I_dec_ref(connector->id) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTDEC, FAIL, "unable to decrement ref count on VOL connector"); - H5FL_FREE(H5VL_t, connector); + /* Set return value */ + ret_value = connector->nrefs; - /* Set return value */ - ret_value = 0; - } /* end if */ - else - /* Set return value */ - ret_value = connector->nrefs; + /* Check for last reference */ + if (0 == connector->nrefs) + if (H5VL__conn_free(connector) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "unable to free VOL connector"); done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5VL_conn_dec_rc() */ +/*------------------------------------------------------------------------- + * Function: H5VL_conn_same_class + * + * Purpose: Determine if two connectors point to the same VOL class + * + * Return: TRUE/FALSE/FAIL + * + *------------------------------------------------------------------------- + */ +htri_t +H5VL_conn_same_class(const H5VL_connector_t *conn1, const H5VL_connector_t *conn2) +{ + htri_t ret_value = FAIL; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Check arguments */ + assert(conn1); + assert(conn2); + + /* Fast check */ + if (conn1 == conn2) + HGOTO_DONE(TRUE); + else { + int cmp_value; /* Comparison result */ + + /* Compare connector classes */ + if (H5VL_cmp_connector_cls(&cmp_value, conn1->cls, conn2->cls) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); + ret_value = (0 == cmp_value); + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5VL_conn_same_class() */ + +/*------------------------------------------------------------------------- + * Function: H5VL__conn_free + * + * Purpose: Free a connector object + * + * Return: SUCCEED/FAIL + * + *------------------------------------------------------------------------- + */ +static herr_t +H5VL__conn_free(H5VL_connector_t *connector) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments */ + assert(connector); + assert(0 == connector->nrefs); + + /* Remove connector from list of active VOL connectors */ + if (H5VL_conn_list_head_g == connector) { + H5VL_conn_list_head_g = H5VL_conn_list_head_g->next; + if (H5VL_conn_list_head_g) + H5VL_conn_list_head_g->prev = NULL; + } + else { + if (connector->prev) + connector->prev->next = connector->next; + if (connector->next) + connector->next->prev = connector->prev; + } + + if (H5VL__free_cls(connector->cls) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTRELEASE, FAIL, "can't free VOL class"); + + H5FL_FREE(H5VL_connector_t, connector); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5VL__conn_free() */ + +/*------------------------------------------------------------------------- + * Function: H5VL__conn_free_id + * + * Purpose: Shim for freeing connector ID + * + * Return: SUCCEED/FAIL + * + *------------------------------------------------------------------------- + */ +static herr_t +H5VL__conn_free_id(H5VL_connector_t *connector, void H5_ATTR_UNUSED **request) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments */ + assert(connector); + + /* Decrement refcount on connector */ + if (H5VL_conn_dec_rc(connector) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTDEC, FAIL, "unable to decrement ref count on VOL connector"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5VL__conn_free_id() */ + /*------------------------------------------------------------------------- * Function: H5VL_object_inc_rc * @@ -1076,7 +1179,7 @@ herr_t H5VL_object_is_native(const H5VL_object_t *obj, bool *is_native) { const H5VL_class_t *cls; /* VOL connector class structs for object */ - const H5VL_class_t *native_cls; /* Native VOL connector class structs */ + H5VL_connector_t *native; /* Native VOL connector */ int cmp_value; /* Comparison result */ herr_t ret_value = SUCCEED; /* Return value */ @@ -1092,11 +1195,10 @@ H5VL_object_is_native(const H5VL_object_t *obj, bool *is_native) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "can't get VOL connector class"); /* Retrieve the native connector class */ - if (NULL == (native_cls = (H5VL_class_t *)H5I_object_verify(H5VL_NATIVE, H5I_VOL))) - HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "can't retrieve native VOL connector class"); + native = H5VL_NATIVE_conn_g; /* Compare connector classes */ - if (H5VL_cmp_connector_cls(&cmp_value, cls, native_cls) < 0) + if (H5VL_cmp_connector_cls(&cmp_value, cls, native->cls) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); /* If classes compare equal, then the object is / is in a native connector's file */ @@ -1173,19 +1275,18 @@ H5VL_file_is_same(const H5VL_object_t *vol_obj1, const H5VL_object_t *vol_obj2, * Purpose: Registers a new VOL connector as a member of the virtual object * layer class. * - * Return: Success: A VOL connector ID which is good until the - * library is closed or the connector is unregistered. - * - * Failure: H5I_INVALID_HID + * Return: Success: A pointer to a VOL connector + * Failure: NULL * *------------------------------------------------------------------------- */ -hid_t -H5VL__register_connector(const void *_cls, bool app_ref, hid_t vipl_id) +H5VL_connector_t * +H5VL__register_connector(const H5VL_class_t *cls, hid_t vipl_id) { - const H5VL_class_t *cls = (const H5VL_class_t *)_cls; - H5VL_class_t *saved = NULL; - hid_t ret_value = H5I_INVALID_HID; + H5VL_connector_t *connector = NULL; + H5VL_class_t *saved = NULL; + bool init_done = false; + H5VL_connector_t *ret_value = NULL; FUNC_ENTER_PACKAGE @@ -1194,27 +1295,38 @@ H5VL__register_connector(const void *_cls, bool app_ref, hid_t vipl_id) /* Copy the class structure so the caller can reuse or free it */ if (NULL == (saved = H5FL_MALLOC(H5VL_class_t))) - HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, H5I_INVALID_HID, - "memory allocation failed for VOL connector class struct"); + HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, NULL, "memory allocation failed for VOL connector class struct"); H5MM_memcpy(saved, cls, sizeof(H5VL_class_t)); if (NULL == (saved->name = H5MM_strdup(cls->name))) - HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, H5I_INVALID_HID, - "memory allocation failed for VOL connector name"); + HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, NULL, "memory allocation failed for VOL connector name"); /* Initialize the VOL connector */ if (cls->initialize && cls->initialize(vipl_id) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, H5I_INVALID_HID, "unable to init VOL connector"); + HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, NULL, "unable to init VOL connector"); + init_done = true; - /* Create the new class ID */ - if ((ret_value = H5I_register(H5I_VOL, saved, app_ref)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector ID"); + /* Create new connector for the class */ + if (NULL == (connector = H5VL__conn_create(saved))) + HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, NULL, "unable to create VOL connector"); -done: - if (ret_value < 0 && saved) { - if (saved->name) - H5MM_xfree_const(saved->name); + /* Set return value */ + ret_value = connector; - H5FL_FREE(H5VL_class_t, saved); +done: + if (NULL == ret_value) { + if (connector) { + if (H5VL__conn_free(connector) < 0) + HDONE_ERROR(H5E_VOL, H5E_CANTRELEASE, NULL, "can't free VOL connector"); + } + else if (init_done) { + if (H5VL__free_cls(saved) < 0) + HDONE_ERROR(H5E_VOL, H5E_CANTRELEASE, NULL, "can't free VOL class"); + } + else if (saved) { + if (saved->name) + H5MM_xfree_const(saved->name); + H5FL_FREE(H5VL_class_t, saved); + } } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -1226,64 +1338,56 @@ H5VL__register_connector(const void *_cls, bool app_ref, hid_t vipl_id) * Purpose: Registers a new VOL connector as a member of the virtual object * layer class. * - * Return: Success: A VOL connector ID which is good until the - * library is closed or the connector is - * unregistered. - * - * Failure: H5I_INVALID_HID + * Return: Success: A pointer to a VOL connector + * Failure: NULL * *------------------------------------------------------------------------- */ -hid_t -H5VL__register_connector_by_class(const H5VL_class_t *cls, bool app_ref, hid_t vipl_id) +H5VL_connector_t * +H5VL__register_connector_by_class(const H5VL_class_t *cls, hid_t vipl_id) { - H5VL_get_connector_ud_t op_data; /* Callback info for connector search */ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_connector_t *connector = NULL; /* Connector for class */ + H5PL_vol_key_t key; /* Info for connector search */ + H5VL_connector_t *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE /* Check arguments */ if (!cls) - HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, H5I_INVALID_HID, - "VOL connector class pointer cannot be NULL"); + HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, NULL, "VOL connector class pointer cannot be NULL"); if (H5VL_VERSION != cls->version) - HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "VOL connector has incompatible version"); + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, NULL, "VOL connector has incompatible version"); if (!cls->name) - HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, - "VOL connector class name cannot be the NULL pointer"); + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, NULL, "VOL connector class name cannot be the NULL pointer"); if (0 == strlen(cls->name)) - HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, - "VOL connector class name cannot be the empty string"); + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, NULL, "VOL connector class name cannot be the empty string"); if (cls->info_cls.copy && !cls->info_cls.free) HGOTO_ERROR( - H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, + H5E_VOL, H5E_CANTREGISTER, NULL, "VOL connector must provide free callback for VOL info objects when a copy callback is provided"); if (cls->wrap_cls.get_wrap_ctx && !cls->wrap_cls.free_wrap_ctx) - HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, NULL, "VOL connector must provide free callback for object wrapping contexts when a get " "callback is provided"); - /* Set up op data for iteration */ - op_data.key.kind = H5VL_GET_CONNECTOR_BY_NAME; - op_data.key.u.name = cls->name; - op_data.found_id = H5I_INVALID_HID; + /* Set up data for find */ + key.kind = H5VL_GET_CONNECTOR_BY_NAME; + key.u.name = cls->name; /* Check if connector is already registered */ - if (H5I_iterate(H5I_VOL, H5VL__get_connector_cb, &op_data, true) < 0) - HGOTO_ERROR(H5E_VOL, H5E_BADITER, H5I_INVALID_HID, "can't iterate over VOL IDs"); - - /* Increment the ref count on the existing VOL connector ID, if it's already registered */ - if (op_data.found_id != H5I_INVALID_HID) { - if (H5I_inc_ref(op_data.found_id, app_ref) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTINC, H5I_INVALID_HID, - "unable to increment ref count on VOL connector"); - ret_value = op_data.found_id; - } /* end if */ - else { - /* Create a new class ID */ - if ((ret_value = H5VL__register_connector(cls, app_ref, vipl_id)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector"); - } /* end else */ + if (H5VL__conn_find(&key, &connector) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTFIND, NULL, "can't search VOL connectors"); + + /* If not found, create a new connector */ + if (NULL == connector) + if (NULL == (connector = H5VL__register_connector(cls, vipl_id))) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, NULL, "unable to register VOL connector"); + + /* Inc. refcount on connector object, so it can be uniformly released */ + H5VL_conn_inc_rc(connector); + + /* Set return value */ + ret_value = connector; done: FUNC_LEAVE_NOAPI(ret_value) @@ -1295,52 +1399,49 @@ H5VL__register_connector_by_class(const H5VL_class_t *cls, bool app_ref, hid_t v * Purpose: Registers a new VOL connector as a member of the virtual object * layer class. * - * Return: Success: A VOL connector ID which is good until the - * library is closed or the connector is - * unregistered. - * - * Failure: H5I_INVALID_HID + * Return: Success: A pointer to a VOL connector + * Failure: NULL * *------------------------------------------------------------------------- */ -hid_t -H5VL__register_connector_by_name(const char *name, bool app_ref, hid_t vipl_id) +H5VL_connector_t * +H5VL__register_connector_by_name(const char *name, hid_t vipl_id) { - H5VL_get_connector_ud_t op_data; /* Callback info for connector search */ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_connector_t *connector = NULL; /* Connector for class */ + H5PL_vol_key_t key; /* Info for connector search */ + H5VL_connector_t *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE - /* Set up op data for iteration */ - op_data.key.kind = H5VL_GET_CONNECTOR_BY_NAME; - op_data.key.u.name = name; - op_data.found_id = H5I_INVALID_HID; + /* Set up data for find */ + key.kind = H5VL_GET_CONNECTOR_BY_NAME; + key.u.name = name; /* Check if connector is already registered */ - if (H5I_iterate(H5I_VOL, H5VL__get_connector_cb, &op_data, app_ref) < 0) - HGOTO_ERROR(H5E_VOL, H5E_BADITER, H5I_INVALID_HID, "can't iterate over VOL ids"); - - /* If connector already registered, increment ref count on ID and return ID */ - if (op_data.found_id != H5I_INVALID_HID) { - if (H5I_inc_ref(op_data.found_id, app_ref) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTINC, H5I_INVALID_HID, - "unable to increment ref count on VOL connector"); - ret_value = op_data.found_id; - } /* end if */ - else { - H5PL_key_t key; + if (H5VL__conn_find(&key, &connector) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTFIND, NULL, "can't search VOL connectors"); + + /* If not found, create a new connector */ + if (NULL == connector) { + H5PL_key_t plugin_key; const H5VL_class_t *cls; /* Try loading the connector */ - key.vol.kind = H5VL_GET_CONNECTOR_BY_NAME; - key.vol.u.name = name; - if (NULL == (cls = (const H5VL_class_t *)H5PL_load(H5PL_TYPE_VOL, &key))) - HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, H5I_INVALID_HID, "unable to load VOL connector"); - - /* Register the connector we loaded */ - if ((ret_value = H5VL__register_connector(cls, app_ref, vipl_id)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector ID"); - } /* end else */ + plugin_key.vol.kind = H5VL_GET_CONNECTOR_BY_NAME; + plugin_key.vol.u.name = name; + if (NULL == (cls = H5PL_load(H5PL_TYPE_VOL, &plugin_key))) + HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, NULL, "unable to load VOL connector"); + + /* Create a connector for the class we loaded */ + if (NULL == (connector = H5VL__register_connector(cls, vipl_id))) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, NULL, "unable to register VOL connector"); + } /* end if */ + + /* Inc. refcount on connector object, so it can be uniformly released */ + H5VL_conn_inc_rc(connector); + + /* Set return value */ + ret_value = connector; done: FUNC_LEAVE_NOAPI(ret_value) @@ -1352,52 +1453,49 @@ H5VL__register_connector_by_name(const char *name, bool app_ref, hid_t vipl_id) * Purpose: Registers a new VOL connector as a member of the virtual object * layer class. * - * Return: Success: A VOL connector ID which is good until the - * library is closed or the connector is - * unregistered. - * - * Failure: H5I_INVALID_HID + * Return: Success: A pointer to a VOL connector + * Failure: NULL * *------------------------------------------------------------------------- */ -hid_t -H5VL__register_connector_by_value(H5VL_class_value_t value, bool app_ref, hid_t vipl_id) +H5VL_connector_t * +H5VL__register_connector_by_value(H5VL_class_value_t value, hid_t vipl_id) { - H5VL_get_connector_ud_t op_data; /* Callback info for connector search */ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_connector_t *connector = NULL; /* Connector for class */ + H5PL_vol_key_t key; /* Info for connector search */ + H5VL_connector_t *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE - /* Set up op data for iteration */ - op_data.key.kind = H5VL_GET_CONNECTOR_BY_VALUE; - op_data.key.u.value = value; - op_data.found_id = H5I_INVALID_HID; + /* Set up data for find */ + key.kind = H5VL_GET_CONNECTOR_BY_VALUE; + key.u.value = value; /* Check if connector is already registered */ - if (H5I_iterate(H5I_VOL, H5VL__get_connector_cb, &op_data, app_ref) < 0) - HGOTO_ERROR(H5E_VOL, H5E_BADITER, H5I_INVALID_HID, "can't iterate over VOL ids"); - - /* If connector already registered, increment ref count on ID and return ID */ - if (op_data.found_id != H5I_INVALID_HID) { - if (H5I_inc_ref(op_data.found_id, app_ref) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTINC, H5I_INVALID_HID, - "unable to increment ref count on VOL connector"); - ret_value = op_data.found_id; - } /* end if */ - else { - H5PL_key_t key; + if (H5VL__conn_find(&key, &connector) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTFIND, NULL, "can't search VOL connectors"); + + /* If not found, create a new connector */ + if (NULL == connector) { + H5PL_key_t plugin_key; const H5VL_class_t *cls; /* Try loading the connector */ - key.vol.kind = H5VL_GET_CONNECTOR_BY_VALUE; - key.vol.u.value = value; - if (NULL == (cls = (const H5VL_class_t *)H5PL_load(H5PL_TYPE_VOL, &key))) - HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, H5I_INVALID_HID, "unable to load VOL connector"); - - /* Register the connector we loaded */ - if ((ret_value = H5VL__register_connector(cls, app_ref, vipl_id)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register VOL connector ID"); - } /* end else */ + plugin_key.vol.kind = H5VL_GET_CONNECTOR_BY_VALUE; + plugin_key.vol.u.value = value; + if (NULL == (cls = H5PL_load(H5PL_TYPE_VOL, &plugin_key))) + HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, NULL, "unable to load VOL connector"); + + /* Create a connector for the class we loaded */ + if (NULL == (connector = H5VL__register_connector(cls, vipl_id))) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, NULL, "unable to register VOL connector ID"); + } /* end if */ + + /* Inc. refcount on connector object, so it can be uniformly released */ + H5VL_conn_inc_rc(connector); + + /* Set return value */ + ret_value = connector; done: FUNC_LEAVE_NOAPI(ret_value) @@ -1417,22 +1515,22 @@ H5VL__register_connector_by_value(H5VL_class_value_t value, bool app_ref, hid_t htri_t H5VL__is_connector_registered_by_name(const char *name) { - H5VL_get_connector_ud_t op_data; /* Callback info for connector search */ - htri_t ret_value = false; /* Return value */ + H5VL_connector_t *connector = NULL; /* Connector for class */ + H5PL_vol_key_t key; /* Info for connector search */ + htri_t ret_value = false; /* Return value */ FUNC_ENTER_PACKAGE - /* Set up op data for iteration */ - op_data.key.kind = H5VL_GET_CONNECTOR_BY_NAME; - op_data.key.u.name = name; - op_data.found_id = H5I_INVALID_HID; + /* Set up data for find */ + key.kind = H5VL_GET_CONNECTOR_BY_NAME; + key.u.name = name; /* Find connector with name */ - if (H5I_iterate(H5I_VOL, H5VL__get_connector_cb, &op_data, true) < 0) - HGOTO_ERROR(H5E_VOL, H5E_BADITER, FAIL, "can't iterate over VOL connectors"); + if (H5VL__conn_find(&key, &connector) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTFIND, FAIL, "can't search VOL connectors"); /* Found a connector with that name */ - if (op_data.found_id != H5I_INVALID_HID) + if (connector) ret_value = true; done: @@ -1454,22 +1552,22 @@ H5VL__is_connector_registered_by_name(const char *name) htri_t H5VL__is_connector_registered_by_value(H5VL_class_value_t value) { - H5VL_get_connector_ud_t op_data; /* Callback info for connector search */ - htri_t ret_value = false; /* Return value */ + H5VL_connector_t *connector = NULL; /* Connector for class */ + H5PL_vol_key_t key; /* Info for connector search */ + htri_t ret_value = false; /* Return value */ FUNC_ENTER_PACKAGE - /* Set up op data for iteration */ - op_data.key.kind = H5VL_GET_CONNECTOR_BY_VALUE; - op_data.key.u.value = value; - op_data.found_id = H5I_INVALID_HID; + /* Set up data for find */ + key.kind = H5VL_GET_CONNECTOR_BY_VALUE; + key.u.value = value; /* Find connector with value */ - if (H5I_iterate(H5I_VOL, H5VL__get_connector_cb, &op_data, true) < 0) - HGOTO_ERROR(H5E_VOL, H5E_BADITER, FAIL, "can't iterate over VOL connectors"); + if (H5VL__conn_find(&key, &connector) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTFIND, FAIL, "can't search VOL connectors"); - /* Found a connector with that name */ - if (op_data.found_id != H5I_INVALID_HID) + /* Found a connector with that value */ + if (connector) ret_value = true; done: @@ -1477,245 +1575,109 @@ H5VL__is_connector_registered_by_value(H5VL_class_value_t value) } /* end H5VL__is_connector_registered_by_value() */ /*------------------------------------------------------------------------- - * Function: H5VL__get_connector_id + * Function: H5VL__get_connector_by_name * - * Purpose: Retrieves the VOL connector ID for a given object ID. + * Purpose: Looks up a connector by its class name. * - * Return: Positive if the VOL class has been registered - * Negative on error (if the class is not a valid class or not registered) + * Return: Pointer to the connector if the VOL class has been registered + * NULL on error (if the class is not a valid class or not registered) * *------------------------------------------------------------------------- */ -hid_t -H5VL__get_connector_id(hid_t obj_id, bool is_api) +H5VL_connector_t * +H5VL__get_connector_by_name(const char *name) { - H5VL_object_t *vol_obj = NULL; - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_connector_t *connector = NULL; /* Connector for class */ + H5PL_vol_key_t key; /* Info for connector search */ + H5VL_connector_t *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE - /* Get the underlying VOL object for the object ID */ - if (NULL == (vol_obj = H5VL_vol_object(obj_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); - - /* Return the VOL object's VOL class ID */ - ret_value = vol_obj->connector->id; - if (H5I_inc_ref(ret_value, is_api) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTINC, H5I_INVALID_HID, "unable to increment ref count on VOL connector"); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL__get_connector_id() */ - -/*------------------------------------------------------------------------- - * Function: H5VL__get_connector_id_by_name - * - * Purpose: Retrieves the ID for a registered VOL connector. - * - * Return: Positive if the VOL class has been registered - * Negative on error (if the class is not a valid class or not registered) - * - *------------------------------------------------------------------------- - */ -hid_t -H5VL__get_connector_id_by_name(const char *name, bool is_api) -{ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ - - FUNC_ENTER_PACKAGE + /* Set up data for find */ + key.kind = H5VL_GET_CONNECTOR_BY_NAME; + key.u.name = name; /* Find connector with name */ - if ((ret_value = H5VL__peek_connector_id_by_name(name)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_BADITER, H5I_INVALID_HID, "can't find VOL connector"); - - /* Found a connector with that name */ - if (H5I_inc_ref(ret_value, is_api) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTINC, H5I_INVALID_HID, "unable to increment ref count on VOL connector"); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL__get_connector_id_by_name() */ - -/*------------------------------------------------------------------------- - * Function: H5VL__get_connector_id_by_value - * - * Purpose: Retrieves the ID for a registered VOL connector. - * - * Return: Positive if the VOL class has been registered - * Negative on error (if the class is not a valid class or - * not registered) - * - *------------------------------------------------------------------------- - */ -hid_t -H5VL__get_connector_id_by_value(H5VL_class_value_t value, bool is_api) -{ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + if (H5VL__conn_find(&key, &connector) < 0) + HGOTO_ERROR(H5E_VOL, H5E_BADITER, NULL, "can't find VOL connector"); - FUNC_ENTER_PACKAGE + if (connector) + /* Inc. refcount on connector object, so it can be uniformly released */ + H5VL_conn_inc_rc(connector); - /* Find connector with value */ - if ((ret_value = H5VL__peek_connector_id_by_value(value)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_BADITER, H5I_INVALID_HID, "can't find VOL connector"); - - /* Found a connector with that value */ - if (H5I_inc_ref(ret_value, is_api) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTINC, H5I_INVALID_HID, "unable to increment ref count on VOL connector"); + /* Set return value */ + ret_value = connector; done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL__get_connector_id_by_value() */ +} /* end H5VL__get_connector_by_name() */ /*------------------------------------------------------------------------- - * Function: H5VL__peek_connector_id_by_name + * Function: H5VL__get_connector_by_value * - * Purpose: Retrieves the ID for a registered VOL connector. Does not - * increment the ref count + * Purpose: Looks up a connector by its class value. * - * Return: Positive if the VOL class has been registered - * Negative on error (if the class is not a valid class or - * not registered) + * Return: Pointer to the connector if the VOL class has been registered + * NULL on error (if the class is not a valid class or not registered) * *------------------------------------------------------------------------- */ -hid_t -H5VL__peek_connector_id_by_name(const char *name) +H5VL_connector_t * +H5VL__get_connector_by_value(H5VL_class_value_t value) { - H5VL_get_connector_ud_t op_data; /* Callback info for connector search */ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_connector_t *connector = NULL; /* Connector for class */ + H5PL_vol_key_t key; /* Info for connector search */ + H5VL_connector_t *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE - /* Set up op data for iteration */ - op_data.key.kind = H5VL_GET_CONNECTOR_BY_NAME; - op_data.key.u.name = name; - op_data.found_id = H5I_INVALID_HID; + /* Set up data for find */ + key.kind = H5VL_GET_CONNECTOR_BY_VALUE; + key.u.value = value; /* Find connector with name */ - if (H5I_iterate(H5I_VOL, H5VL__get_connector_cb, &op_data, true) < 0) - HGOTO_ERROR(H5E_VOL, H5E_BADITER, H5I_INVALID_HID, "can't iterate over VOL connectors"); + if (H5VL__conn_find(&key, &connector) < 0) + HGOTO_ERROR(H5E_VOL, H5E_BADITER, NULL, "can't find VOL connector"); - /* Set return value */ - ret_value = op_data.found_id; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL__peek_connector_id_by_name() */ - -/*------------------------------------------------------------------------- - * Function: H5VL__peek_connector_id_by_value - * - * Purpose: Retrieves the ID for a registered VOL connector. Does not - * increment the ref count - * - * Return: Positive if the VOL class has been registered - * Negative on error (if the class is not a valid class or - * not registered) - * - *------------------------------------------------------------------------- - */ -hid_t -H5VL__peek_connector_id_by_value(H5VL_class_value_t value) -{ - H5VL_get_connector_ud_t op_data; /* Callback info for connector search */ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Set up op data for iteration */ - op_data.key.kind = H5VL_GET_CONNECTOR_BY_VALUE; - op_data.key.u.value = value; - op_data.found_id = H5I_INVALID_HID; - - /* Find connector with value */ - if (H5I_iterate(H5I_VOL, H5VL__get_connector_cb, &op_data, true) < 0) - HGOTO_ERROR(H5E_VOL, H5E_BADITER, H5I_INVALID_HID, "can't iterate over VOL connectors"); + if (connector) + /* Inc. refcount on connector object, so it can be uniformly released */ + H5VL_conn_inc_rc(connector); /* Set return value */ - ret_value = op_data.found_id; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL__peek_connector_id_by_value() */ - -/*------------------------------------------------------------------------- - * Function: H5VL__connector_str_to_info - * - * Purpose: Deserializes a string into a connector's info object - * - * Return: Success: Non-negative - * Failure: Negative - * - *------------------------------------------------------------------------- - */ -herr_t -H5VL__connector_str_to_info(const char *str, hid_t connector_id, void **info) -{ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Only deserialize string, if it's non-NULL */ - if (str) { - H5VL_class_t *cls; /* VOL connector's class struct */ - - /* Check args and get class pointer */ - if (NULL == (cls = (H5VL_class_t *)H5I_object_verify(connector_id, H5I_VOL))) - HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, FAIL, "not a VOL connector ID"); - - /* Allow the connector to deserialize info */ - if (cls->info_cls.from_str) { - if ((cls->info_cls.from_str)(str, info) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTUNSERIALIZE, FAIL, "can't deserialize connector info"); - } /* end if */ - else - *info = NULL; - } /* end if */ - else - *info = NULL; + ret_value = connector; done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL__connector_str_to_info() */ +} /* end H5VL__get_connector_by_value() */ /*------------------------------------------------------------------------- * Function: H5VL__get_connector_name * - * Purpose: Private version of H5VLget_connector_name + * Purpose: Retrieve name of connector * * Return: Success: The length of the connector name - * Failure: Negative + * Failure: Can't fail * *------------------------------------------------------------------------- */ -ssize_t -H5VL__get_connector_name(hid_t id, char *name /*out*/, size_t size) +size_t +H5VL__get_connector_name(const H5VL_connector_t *connector, char *name /*out*/, size_t size) { - H5VL_object_t *vol_obj; - const H5VL_class_t *cls; - size_t len; - ssize_t ret_value = -1; - - FUNC_ENTER_PACKAGE + size_t len; - /* get the object pointer */ - if (NULL == (vol_obj = H5VL_vol_object(id))) - HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, FAIL, "invalid VOL identifier"); + FUNC_ENTER_PACKAGE_NOERR - cls = vol_obj->connector->cls; + /* Sanity check */ + assert(connector); - len = strlen(cls->name); + len = strlen(connector->cls->name); if (name) { - strncpy(name, cls->name, size); + strncpy(name, connector->cls->name, size); if (len >= size) name[size - 1] = '\0'; } /* end if */ - /* Set the return value for the API call */ - ret_value = (ssize_t)len; - -done: - FUNC_LEAVE_NOAPI(ret_value) + FUNC_LEAVE_NOAPI(len) } /* end H5VL__get_connector_name() */ /*------------------------------------------------------------------------- @@ -1734,28 +1696,13 @@ H5VL__get_connector_name(hid_t id, char *name /*out*/, size_t size) H5VL_object_t * H5VL_vol_object(hid_t id) { - void *obj = NULL; - H5I_type_t obj_type; H5VL_object_t *ret_value = NULL; FUNC_ENTER_NOAPI(NULL) - obj_type = H5I_get_type(id); - if (H5I_FILE == obj_type || H5I_GROUP == obj_type || H5I_ATTR == obj_type || H5I_DATASET == obj_type || - H5I_DATATYPE == obj_type || H5I_MAP == obj_type) { - /* Get the object */ - if (NULL == (obj = H5I_object(id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "invalid identifier"); - - /* If this is a datatype, get the VOL object attached to the H5T_t struct */ - if (H5I_DATATYPE == obj_type) - if (NULL == (obj = H5T_get_named_type((H5T_t *)obj))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a named datatype"); - } /* end if */ - else - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "invalid identifier type to function"); - - ret_value = (H5VL_object_t *)obj; + /* Get the underlying object */ + if (NULL == (ret_value = H5VL_vol_object_verify(id, H5I_get_type(id)))) + HGOTO_ERROR(H5E_VOL, H5E_CANTGET, NULL, "can't retrieve object for ID"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -1782,13 +1729,19 @@ H5VL_vol_object_verify(hid_t id, H5I_type_t obj_type) FUNC_ENTER_NOAPI(NULL) - if (NULL == (obj = H5I_object_verify(id, obj_type))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "identifier is not of specified type"); + if (H5I_FILE == obj_type || H5I_GROUP == obj_type || H5I_ATTR == obj_type || H5I_DATASET == obj_type || + H5I_DATATYPE == obj_type || H5I_MAP == obj_type) { + /* Get the object */ + if (NULL == (obj = H5I_object_verify(id, obj_type))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "identifier is not of specified type"); - /* If this is a datatype, get the VOL object attached to the H5T_t struct */ - if (H5I_DATATYPE == obj_type) - if (NULL == (obj = H5T_get_named_type((H5T_t *)obj))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a named datatype"); + /* If this is a datatype, get the VOL object attached to the H5T_t struct */ + if (H5I_DATATYPE == obj_type) + if (NULL == (obj = H5T_get_named_type((H5T_t *)obj))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a named datatype"); + } /* end if */ + else + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "invalid identifier type to function"); ret_value = (H5VL_object_t *)obj; @@ -1875,7 +1828,7 @@ H5VL__object(hid_t id, H5I_type_t obj_type) case H5I_ATTR: case H5I_MAP: /* get the object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(id))) + if (NULL == (vol_obj = H5I_object(id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "invalid identifier"); break; @@ -1883,7 +1836,7 @@ H5VL__object(hid_t id, H5I_type_t obj_type) H5T_t *dt = NULL; /* get the object */ - if (NULL == (dt = (H5T_t *)H5I_object(id))) + if (NULL == (dt = H5I_object(id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "invalid identifier"); /* Get the actual datatype object that should be the vol_obj */ @@ -2459,7 +2412,7 @@ H5VL_wrap_register(H5I_type_t type, void *obj, bool app_ref) * field will get clobbered later, so disallow this. */ if (type == H5I_DATATYPE) - if (vol_wrap_ctx->connector->id == H5VL_NATIVE) + if (vol_wrap_ctx->connector == H5VL_NATIVE_conn_g) if (true == H5T_already_vol_managed((const H5T_t *)obj)) HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, H5I_INVALID_HID, "can't wrap an uncommitted datatype"); @@ -2468,7 +2421,7 @@ H5VL_wrap_register(H5I_type_t type, void *obj, bool app_ref) HGOTO_ERROR(H5E_VOL, H5E_CANTCREATE, H5I_INVALID_HID, "can't wrap library object"); /* Get an ID for the object */ - if ((ret_value = H5VL_register_using_vol_id(type, new_obj, vol_wrap_ctx->connector->id, app_ref)) < 0) + if ((ret_value = H5VL_register(type, new_obj, vol_wrap_ctx->connector, app_ref)) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to get an ID for the object"); done: @@ -2536,7 +2489,7 @@ H5VL_check_plugin_load(const H5VL_class_t *cls, const H5PL_key_t *key, bool *suc *------------------------------------------------------------------------- */ void -H5VL__is_default_conn(hid_t fapl_id, hid_t connector_id, bool *is_default) +H5VL__is_default_conn(hid_t fapl_id, const H5VL_connector_t *connector, bool *is_default) { FUNC_ENTER_PACKAGE_NOERR @@ -2547,8 +2500,8 @@ H5VL__is_default_conn(hid_t fapl_id, hid_t connector_id, bool *is_default) * values in the FAPL, connector ID, or the HDF5_VOL_CONNECTOR environment * variable being set. */ - *is_default = (H5VL_def_conn_s.connector_id == H5_DEFAULT_VOL) && - ((H5P_FILE_ACCESS_DEFAULT == fapl_id) || connector_id == H5_DEFAULT_VOL); + *is_default = (H5VL_def_conn_s.connector == H5_DEFAULT_VOL) && + (H5P_FILE_ACCESS_DEFAULT == fapl_id || connector == H5_DEFAULT_VOL); FUNC_LEAVE_NOAPI_VOID } /* end H5VL__is_default_conn() */ @@ -2573,7 +2526,7 @@ H5VL_setup_args(hid_t loc_id, H5I_type_t id_type, H5VL_object_t **vol_obj) assert(vol_obj); /* Get attribute pointer */ - if (NULL == (*vol_obj = (H5VL_object_t *)H5I_object_verify(loc_id, id_type))) + if (NULL == (*vol_obj = H5I_object_verify(loc_id, id_type))) HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, FAIL, "not the correct type of ID"); /* Set up collective metadata (if appropriate) */ @@ -2823,7 +2776,7 @@ H5VL_setup_token_args(hid_t loc_id, H5O_token_t *obj_token, H5VL_object_t **vol_ } /* end H5VL_setup_token_args() */ /*------------------------------------------------------------------------- - * Function: H5VL_get_cap_flags + * Function: H5VL_conn_prop_get_cap_flags * * Purpose: Query capability flags for connector property. * @@ -2836,7 +2789,7 @@ H5VL_setup_token_args(hid_t loc_id, H5O_token_t *obj_token, H5VL_object_t **vol_ *------------------------------------------------------------------------- */ herr_t -H5VL_get_cap_flags(const H5VL_connector_prop_t *connector_prop, uint64_t *cap_flags) +H5VL_conn_prop_get_cap_flags(const H5VL_connector_prop_t *connector_prop, uint64_t *cap_flags) { herr_t ret_value = SUCCEED; /* Return value */ @@ -2846,15 +2799,10 @@ H5VL_get_cap_flags(const H5VL_connector_prop_t *connector_prop, uint64_t *cap_fl assert(connector_prop); /* Copy the connector ID & info, if there is one */ - if (connector_prop->connector_id > 0) { - H5VL_class_t *connector; /* Pointer to connector */ - - /* Retrieve the connector for the ID */ - if (NULL == (connector = (H5VL_class_t *)H5I_object(connector_prop->connector_id))) - HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not a VOL connector ID"); - + if (connector_prop->connector) { /* Query the connector's capability flags */ - if (H5VL_introspect_get_cap_flags(connector_prop->connector_info, connector, cap_flags) < 0) + if (H5VL_introspect_get_cap_flags(connector_prop->connector_info, connector_prop->connector->cls, + cap_flags) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTGET, FAIL, "can't query connector's capability flags"); } /* end if */ else @@ -2862,4 +2810,4 @@ H5VL_get_cap_flags(const H5VL_connector_prop_t *connector_prop, uint64_t *cap_fl done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL_get_cap_flags() */ +} /* end H5VL_conn_prop_get_cap_flags() */ diff --git a/src/H5VLmodule.h b/src/H5VLmodule.h index 535027325fe..81321f6d787 100644 --- a/src/H5VLmodule.h +++ b/src/H5VLmodule.h @@ -468,7 +468,7 @@ * } H5L_info2_t; * \endcode * - *

H5Literate() and H5Lvisit() → H5Literte2() and H5Lvisit2()

+ *

H5Literate() and H5Lvisit() → H5Literate2() and H5Lvisit2()

* The callback used in these API calls used the old #H5L_info_t struct, which used * addresses instead of tokens. These callbacks were versioned in the C library and * now take modified #H5L_iterate2_t callbacks which use the new token-based info diff --git a/src/H5VLnative.c b/src/H5VLnative.c index 6f6b2d0768d..3a360713ee6 100644 --- a/src/H5VLnative.c +++ b/src/H5VLnative.c @@ -39,11 +39,9 @@ #include "H5VLnative_private.h" /* Native VOL connector */ -/* The VOL connector identification number */ -static hid_t H5VL_NATIVE_ID_g = H5I_INVALID_HID; - -/* Prototypes */ -static herr_t H5VL__native_term(void); +/* The native VOL connector */ +hid_t H5VL_NATIVE_g = H5I_INVALID_HID; +H5VL_connector_t *H5VL_NATIVE_conn_g = NULL; #define H5VL_NATIVE_CAP_FLAGS \ (H5VL_CAP_FLAG_NATIVE_FILES | H5VL_CAP_FLAG_ATTR_BASIC | H5VL_CAP_FLAG_ATTR_MORE | \ @@ -66,7 +64,7 @@ static const H5VL_class_t H5VL_native_cls_g = { H5VL_NATIVE_VERSION, /* connector version */ H5VL_NATIVE_CAP_FLAGS, /* capability flags */ NULL, /* initialize */ - H5VL__native_term, /* terminate */ + NULL, /* terminate */ { /* info_cls */ (size_t)0, /* info size */ @@ -182,37 +180,42 @@ static const H5VL_class_t H5VL_native_cls_g = { }; /*------------------------------------------------------------------------- - * Function: H5VL_native_register + * Function: H5VL__native_register * - * Purpose: Register the native VOL connector and retrieve an ID for it. + * Purpose: Register the native VOL connector and set up an ID for it. * - * Return: Success: The ID for the native connector - * Failure: H5I_INVALID_HID + * Return: SUCCEED/FAIL * *------------------------------------------------------------------------- */ -hid_t -H5VL_native_register(void) +herr_t +H5VL__native_register(void) { - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(H5I_INVALID_HID) + FUNC_ENTER_PACKAGE /* Register the native VOL connector, if it isn't already */ - if (H5I_INVALID_HID == H5VL_NATIVE_ID_g) - if ((H5VL_NATIVE_ID_g = - H5VL__register_connector(&H5VL_native_cls_g, true, H5P_VOL_INITIALIZE_DEFAULT)) < 0) - HGOTO_ERROR(H5E_VOL, H5E_CANTINSERT, H5I_INVALID_HID, "can't create ID for native VOL connector"); - - /* Set return value */ - ret_value = H5VL_NATIVE_ID_g; + if (NULL == H5VL_NATIVE_conn_g) + if (NULL == + (H5VL_NATIVE_conn_g = H5VL__register_connector(&H5VL_native_cls_g, H5P_VOL_INITIALIZE_DEFAULT))) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, FAIL, "can't register native VOL connector"); + + /* Get ID for connector */ + if (H5I_VOL != H5I_get_type(H5VL_NATIVE_g)) { + if ((H5VL_NATIVE_g = H5I_register(H5I_VOL, H5VL_NATIVE_conn_g, false)) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, FAIL, "can't create ID for native VOL connector"); + + /* ID is holding a reference to the connector */ + H5VL_conn_inc_rc(H5VL_NATIVE_conn_g); + } done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5VL_native_register() */ +} /* end H5VL__native_register() */ /*--------------------------------------------------------------------------- - * Function: H5VL__native_term + * Function: H5VL__native_unregister * * Purpose: Shut down the native VOL * @@ -220,16 +223,17 @@ H5VL_native_register(void) * *--------------------------------------------------------------------------- */ -static herr_t -H5VL__native_term(void) +herr_t +H5VL__native_unregister(void) { FUNC_ENTER_PACKAGE_NOERR - /* Reset VOL ID */ - H5VL_NATIVE_ID_g = H5I_INVALID_HID; + /* Reset VOL connector info */ + H5VL_NATIVE_g = H5I_INVALID_HID; + H5VL_NATIVE_conn_g = NULL; FUNC_LEAVE_NOAPI(SUCCEED) -} /* end H5VL__native_term() */ +} /* end H5VL__native_unregister() */ /*--------------------------------------------------------------------------- * Function: H5VL__native_introspect_get_conn_cls diff --git a/src/H5VLnative.h b/src/H5VLnative.h index 6ca484ae329..f0a185b22d6 100644 --- a/src/H5VLnative.h +++ b/src/H5VLnative.h @@ -25,8 +25,16 @@ /* Public Macros */ /*****************/ +/* When this header is included from a private header, don't make calls to H5open() */ +#undef H5OPEN +#ifndef H5private_H +#define H5OPEN H5open(), +#else /* H5private_H */ +#define H5OPEN +#endif /* H5private_H */ + /* Identifier for the native VOL connector */ -#define H5VL_NATIVE (H5VL_native_register()) +#define H5VL_NATIVE (H5OPEN H5VL_NATIVE_g) /* Characteristics of the native VOL connector */ #define H5VL_NATIVE_NAME "native" @@ -514,6 +522,9 @@ typedef union H5VL_native_object_optional_args_t { extern "C" { #endif +/* Global variable to hold the VOL connector ID */ +H5_DLLVAR hid_t H5VL_NATIVE_g; + /* Token <--> address converters */ /** @@ -551,12 +562,6 @@ H5_DLL herr_t H5VLnative_addr_to_token(hid_t loc_id, haddr_t addr, H5O_token_t * */ H5_DLL herr_t H5VLnative_token_to_addr(hid_t loc_id, H5O_token_t token, haddr_t *addr); -/** @private - * - * \brief Register the native VOL connector and retrieve an ID for it - */ -H5_DLL hid_t H5VL_native_register(void); - #ifdef __cplusplus } #endif diff --git a/src/H5VLnative_private.h b/src/H5VLnative_private.h index c80c114a673..e3f5fb8f405 100644 --- a/src/H5VLnative_private.h +++ b/src/H5VLnative_private.h @@ -17,9 +17,12 @@ #ifndef H5VLnative_private_H #define H5VLnative_private_H +/* Include connector's public header */ +#include "H5VLnative.h" /* Native VOL connector */ + /* Private headers needed by this file */ -#include "H5Fprivate.h" /* Files */ -#include "H5VLnative.h" /* Native VOL connector */ +#include "H5Fprivate.h" /* Files */ +#include "H5VLprivate.h" /* Virtual Object Layer */ /**************************/ /* Library Private Macros */ @@ -33,14 +36,13 @@ /* Library Private Variables */ /*****************************/ +/* The native VOL connector */ +H5_DLLVAR H5VL_connector_t *H5VL_NATIVE_conn_g; + /******************************/ /* Library Private Prototypes */ /******************************/ -#ifdef __cplusplus -extern "C" { -#endif - /* Attribute callbacks */ H5_DLL void *H5VL__native_attr_create(void *obj, const H5VL_loc_params_t *loc_params, const char *attr_name, hid_t type_id, hid_t space_id, hid_t acpl_id, hid_t aapl_id, @@ -162,8 +164,4 @@ H5_DLL herr_t H5VL_native_addr_to_token(void *obj, H5I_type_t obj_type, haddr_t H5_DLL herr_t H5VL_native_token_to_addr(void *obj, H5I_type_t obj_type, H5O_token_t token, haddr_t *addr); H5_DLL herr_t H5VL_native_get_file_struct(void *obj, H5I_type_t type, H5F_t **file); -#ifdef __cplusplus -} -#endif - #endif /* H5VLnative_private_H */ diff --git a/src/H5VLpassthru.c b/src/H5VLpassthru.c index df13afc2513..8c1de0e6d96 100644 --- a/src/H5VLpassthru.c +++ b/src/H5VLpassthru.c @@ -242,7 +242,7 @@ static herr_t H5VL_pass_through_optional(void *obj, H5VL_optional_args_t *args, /*******************/ /* Pass through VOL connector class struct */ -static const H5VL_class_t H5VL_pass_through_g = { +const H5VL_class_t H5VL_pass_through_g = { H5VL_VERSION, /* VOL class struct version */ (H5VL_class_value_t)H5VL_PASSTHRU_VALUE, /* value */ H5VL_PASSTHRU_NAME, /* name */ @@ -364,9 +364,6 @@ static const H5VL_class_t H5VL_pass_through_g = { H5VL_pass_through_optional /* optional */ }; -/* The connector identification number, initialized at runtime */ -static hid_t H5VL_PASSTHRU_g = H5I_INVALID_HID; - /*------------------------------------------------------------------------- * Function: H5VL__pass_through_new_obj * @@ -420,27 +417,6 @@ H5VL_pass_through_free_obj(H5VL_pass_through_t *obj) return 0; } /* end H5VL__pass_through_free_obj() */ -/*------------------------------------------------------------------------- - * Function: H5VL_pass_through_register - * - * Purpose: Register the pass-through VOL connector and retrieve an ID - * for it. - * - * Return: Success: The ID for the pass-through VOL connector - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -hid_t -H5VL_pass_through_register(void) -{ - /* Singleton register the pass-through VOL connector ID */ - if (H5VL_PASSTHRU_g < 0) - H5VL_PASSTHRU_g = H5VLregister_connector(&H5VL_pass_through_g, H5P_DEFAULT); - - return H5VL_PASSTHRU_g; -} /* end H5VL_pass_through_register() */ - /*------------------------------------------------------------------------- * Function: H5VL_pass_through_init * @@ -486,9 +462,6 @@ H5VL_pass_through_term(void) printf("------- PASS THROUGH VOL TERM\n"); #endif - /* Reset VOL ID */ - H5VL_PASSTHRU_g = H5I_INVALID_HID; - return 0; } /* end H5VL_pass_through_term() */ diff --git a/src/H5VLpassthru.h b/src/H5VLpassthru.h index ec396cc8eb0..8480081cde6 100644 --- a/src/H5VLpassthru.h +++ b/src/H5VLpassthru.h @@ -20,8 +20,16 @@ /* Public headers needed by this file */ #include "H5VLpublic.h" /* Virtual Object Layer */ +/* When this header is included from a private header, don't make calls to H5open() */ +#undef H5OPEN +#ifndef H5private_H +#define H5OPEN H5open(), +#else /* H5private_H */ +#define H5OPEN +#endif /* H5private_H */ + /* Identifier for the pass-through VOL connector */ -#define H5VL_PASSTHRU (H5VL_pass_through_register()) +#define H5VL_PASSTHRU (H5OPEN H5VL_PASSTHRU_g) /* Characteristics of the pass-through VOL connector */ #define H5VL_PASSTHRU_NAME "pass_through" @@ -38,7 +46,8 @@ typedef struct H5VL_pass_through_info_t { extern "C" { #endif -H5_DLL hid_t H5VL_pass_through_register(void); +/* Global variable to hold the VOL connector ID */ +H5_DLLVAR hid_t H5VL_PASSTHRU_g; #ifdef __cplusplus } diff --git a/src/H5VLpassthru_int.c b/src/H5VLpassthru_int.c new file mode 100644 index 00000000000..b514101b8c4 --- /dev/null +++ b/src/H5VLpassthru_int.c @@ -0,0 +1,97 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Purpose: Private routines for the internal passthru VOL connector. + * + * Necessary for using internal library routines, which are + * disallowed within the actual passthru VOL connector code. + * + */ + +/****************/ +/* Module Setup */ +/****************/ + +#define H5VL_FRIEND /* Suppress error about including H5VLpkg */ + +/***********/ +/* Headers */ +/***********/ + +#include "H5private.h" /* Generic Functions */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Iprivate.h" /* IDs */ +#include "H5Pprivate.h" /* Property lists */ +#include "H5VLpkg.h" /* Virtual Object Layer */ + +#include "H5VLpassthru_private.h" /* Passthru VOL connector */ + +/* The native passthru VOL connector */ +hid_t H5VL_PASSTHRU_g = H5I_INVALID_HID; +H5VL_connector_t *H5VL_PASSTHRU_conn_g = NULL; + +/*------------------------------------------------------------------------- + * Function: H5VL__passthru_register + * + * Purpose: Register the passthru VOL connector and set up an ID for it. + * + * Return: SUCCEED/FAIL + * + *------------------------------------------------------------------------- + */ +herr_t +H5VL__passthru_register(void) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Register the passthru VOL connector, if it isn't already */ + if (NULL == H5VL_PASSTHRU_conn_g) + if (NULL == (H5VL_PASSTHRU_conn_g = + H5VL__register_connector(&H5VL_pass_through_g, H5P_VOL_INITIALIZE_DEFAULT))) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, FAIL, "can't register passthru VOL connector"); + + /* Get ID for connector */ + if (H5I_VOL != H5I_get_type(H5VL_PASSTHRU_g)) { + if ((H5VL_PASSTHRU_g = H5I_register(H5I_VOL, H5VL_PASSTHRU_conn_g, false)) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, FAIL, "can't create ID for passthru VOL connector"); + + /* ID is holding a reference to the connector */ + H5VL_conn_inc_rc(H5VL_PASSTHRU_conn_g); + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5VL__passthru_register() */ + +/*--------------------------------------------------------------------------- + * Function: H5VL__passthru_unregister + * + * Purpose: Shut down the passthru VOL + * + * Returns: SUCCEED (Can't fail) + * + *--------------------------------------------------------------------------- + */ +herr_t +H5VL__passthru_unregister(void) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* Reset VOL connector info */ + H5VL_PASSTHRU_g = H5I_INVALID_HID; + H5VL_PASSTHRU_conn_g = NULL; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5VL__passthru_unregister() */ diff --git a/src/H5VLpassthru_private.h b/src/H5VLpassthru_private.h new file mode 100644 index 00000000000..8ac340937c2 --- /dev/null +++ b/src/H5VLpassthru_private.h @@ -0,0 +1,48 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Purpose: The private header file for the passthru VOL connector. + */ + +#ifndef H5VLpassthru_private_H +#define H5VLpassthru_private_H + +/* Include connector's public header */ +#include "H5VLpassthru.h" /* Passthru VOL connector */ + +/* Private headers needed by this file */ +#include "H5VLprivate.h" /* Virtual Object Layer */ + +/**************************/ +/* Library Private Macros */ +/**************************/ + +/****************************/ +/* Library Private Typedefs */ +/****************************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/* Passthru VOL connector's class struct */ +H5_DLLVAR const H5VL_class_t H5VL_pass_through_g; + +/* The native VOL connector */ +H5_DLLVAR H5VL_connector_t *H5VL_PASSTHRU_conn_g; + +/******************************/ +/* Library Private Prototypes */ +/******************************/ + +#endif /* H5VLpassthru_private_H */ diff --git a/src/H5VLpkg.h b/src/H5VLpkg.h index dae44eaf5d1..f33406e7602 100644 --- a/src/H5VLpkg.h +++ b/src/H5VLpkg.h @@ -36,6 +36,21 @@ /* Package Private Typedefs */ /****************************/ +/* Internal struct to track VOL connectors */ +struct H5VL_connector_t { + H5VL_class_t *cls; /* Pointer to connector class struct */ + int64_t nrefs; /* Number of references to this struct */ + struct H5VL_connector_t *next, *prev; /* Pointers to the next & previous */ + /* connectors in global list of active connectors */ +}; + +/* Internal vol object structure returned to the API */ +struct H5VL_object_t { + void *data; /* Pointer to connector-managed data for this object */ + H5VL_connector_t *connector; /* Pointer to VOL connector used by this object */ + size_t rc; /* Reference count */ +}; + /*****************************/ /* Package Private Variables */ /*****************************/ @@ -43,30 +58,35 @@ /******************************/ /* Package Private Prototypes */ /******************************/ -H5_DLL herr_t H5VL__set_def_conn(void); -H5_DLL hid_t H5VL__register_connector(const void *cls, bool app_ref, hid_t vipl_id); -H5_DLL hid_t H5VL__register_connector_by_class(const H5VL_class_t *cls, bool app_ref, hid_t vipl_id); -H5_DLL hid_t H5VL__register_connector_by_name(const char *name, bool app_ref, hid_t vipl_id); -H5_DLL hid_t H5VL__register_connector_by_value(H5VL_class_value_t value, bool app_ref, hid_t vipl_id); -H5_DLL htri_t H5VL__is_connector_registered_by_name(const char *name); -H5_DLL htri_t H5VL__is_connector_registered_by_value(H5VL_class_value_t value); -H5_DLL hid_t H5VL__get_connector_id(hid_t obj_id, bool is_api); -H5_DLL hid_t H5VL__get_connector_id_by_name(const char *name, bool is_api); -H5_DLL hid_t H5VL__get_connector_id_by_value(H5VL_class_value_t value, bool is_api); -H5_DLL hid_t H5VL__peek_connector_id_by_name(const char *name); -H5_DLL hid_t H5VL__peek_connector_id_by_value(H5VL_class_value_t value); -H5_DLL herr_t H5VL__connector_str_to_info(const char *str, hid_t connector_id, void **info); -H5_DLL ssize_t H5VL__get_connector_name(hid_t id, char *name /*out*/, size_t size); -H5_DLL void H5VL__is_default_conn(hid_t fapl_id, hid_t connector_id, bool *is_default); -H5_DLL herr_t H5VL__register_opt_operation(H5VL_subclass_t subcls, const char *op_name, int *op_val); -H5_DLL size_t H5VL__num_opt_operation(void); -H5_DLL herr_t H5VL__find_opt_operation(H5VL_subclass_t subcls, const char *op_name, int *op_val); -H5_DLL herr_t H5VL__unregister_opt_operation(H5VL_subclass_t subcls, const char *op_name); -H5_DLL herr_t H5VL__term_opt_operation(void); +H5_DLL herr_t H5VL__set_def_conn(void); +H5_DLL H5VL_connector_t *H5VL__register_connector(const H5VL_class_t *cls, hid_t vipl_id); +H5_DLL H5VL_connector_t *H5VL__register_connector_by_class(const H5VL_class_t *cls, hid_t vipl_id); +H5_DLL H5VL_connector_t *H5VL__register_connector_by_name(const char *name, hid_t vipl_id); +H5_DLL H5VL_connector_t *H5VL__register_connector_by_value(H5VL_class_value_t value, hid_t vipl_id); +H5_DLL htri_t H5VL__is_connector_registered_by_name(const char *name); +H5_DLL htri_t H5VL__is_connector_registered_by_value(H5VL_class_value_t value); +H5_DLL H5VL_connector_t *H5VL__get_connector_by_name(const char *name); +H5_DLL H5VL_connector_t *H5VL__get_connector_by_value(H5VL_class_value_t value); +H5_DLL herr_t H5VL__connector_str_to_info(const char *str, H5VL_connector_t *connector, void **info); +H5_DLL size_t H5VL__get_connector_name(const H5VL_connector_t *connector, char *name /*out*/, size_t size); +H5_DLL void H5VL__is_default_conn(hid_t fapl_id, const H5VL_connector_t *connector, bool *is_default); +H5_DLL herr_t H5VL__register_opt_operation(H5VL_subclass_t subcls, const char *op_name, int *op_val); +H5_DLL size_t H5VL__num_opt_operation(void); +H5_DLL herr_t H5VL__find_opt_operation(H5VL_subclass_t subcls, const char *op_name, int *op_val); +H5_DLL herr_t H5VL__unregister_opt_operation(H5VL_subclass_t subcls, const char *op_name); +H5_DLL herr_t H5VL__term_opt_operation(void); + +/* Register the internal VOL connectors */ +H5_DLL herr_t H5VL__native_register(void); +H5_DLL herr_t H5VL__native_unregister(void); +H5_DLL herr_t H5VL__passthru_register(void); +H5_DLL herr_t H5VL__passthru_unregister(void); /* Testing functions */ #ifdef H5VL_TESTING H5_DLL herr_t H5VL__reparse_def_vol_conn_variable_test(void); +H5_DLL htri_t H5VL__is_native_connector_test(hid_t vol_id); +H5_DLL hid_t H5VL__register_using_vol_id_test(H5I_type_t type, void *object, hid_t vol_id); #endif /* H5VL_TESTING */ #endif /* H5VLpkg_H */ diff --git a/src/H5VLprivate.h b/src/H5VLprivate.h index 2904a5b1c15..635642969bb 100644 --- a/src/H5VLprivate.h +++ b/src/H5VLprivate.h @@ -27,28 +27,33 @@ /* Library Private Macros */ /**************************/ +/* If the module using this macro is allowed access to the private variables, access them directly */ +#ifdef H5VL_MODULE +#define H5VL_OBJ_RC(VOL_OBJ) ((VOL_OBJ)->rc) +#define H5VL_OBJ_CONNECTOR(VOL_OBJ) ((VOL_OBJ)->connector) +#define H5VL_OBJ_DATA(VOL_OBJ) ((VOL_OBJ)->data) +#define H5VL_OBJ_DATA_RESET(VOL_OBJ) ((VOL_OBJ)->data = NULL) +#else /* H5VL_MODULE */ +#define H5VL_OBJ_RC(VOL_OBJ) (H5VL_obj_get_rc(VOL_OBJ)) +#define H5VL_OBJ_CONNECTOR(VOL_OBJ) (H5VL_obj_get_connector(VOL_OBJ)) +#define H5VL_OBJ_DATA(VOL_OBJ) (H5VL_obj_get_data(VOL_OBJ)) +#define H5VL_OBJ_DATA_RESET(VOL_OBJ) (H5VL_obj_reset_data(VOL_OBJ)) +#endif /* H5VL_MODULE */ + /****************************/ /* Library Private Typedefs */ /****************************/ -/* Internal struct to track VOL connector information for objects */ -typedef struct H5VL_t { - const H5VL_class_t *cls; /* Pointer to connector class struct */ - int64_t nrefs; /* Number of references by objects using this struct */ - hid_t id; /* Identifier for the VOL connector */ -} H5VL_t; - -/* Internal vol object structure returned to the API */ -typedef struct H5VL_object_t { - void *data; /* Pointer to connector-managed data for this object */ - H5VL_t *connector; /* Pointer to VOL connector struct */ - size_t rc; /* Reference count */ -} H5VL_object_t; - -/* Internal structure to hold the connector ID & info for FAPLs */ +/* Typedef for VOL connector (defined in H5VLpkg.h) */ +typedef struct H5VL_connector_t H5VL_connector_t; + +/* Typedef for VOL object (defined in H5VLpkg.h) */ +typedef struct H5VL_object_t H5VL_object_t; + +/* Property for the connector ID & info in FAPLs */ typedef struct H5VL_connector_prop_t { - hid_t connector_id; /* VOL connector's ID */ - const void *connector_info; /* VOL connector info, for open callbacks */ + H5VL_connector_t *connector; /* VOL connector */ + const void *connector_info; /* VOL connector info, for open callbacks */ } H5VL_connector_prop_t; /* Which kind of VOL connector field to use for searching */ @@ -66,15 +71,22 @@ typedef enum H5VL_get_connector_kind_t { /******************************/ /* Utility functions */ -H5_DLL herr_t H5VL_init_phase1(void); -H5_DLL herr_t H5VL_init_phase2(void); -H5_DLL H5VL_t *H5VL_new_connector(hid_t connector_id); -H5_DLL herr_t H5VL_cmp_connector_cls(int *cmp_value, const H5VL_class_t *cls1, const H5VL_class_t *cls2); -H5_DLL herr_t H5VL_conn_copy(H5VL_connector_prop_t *value); -H5_DLL int64_t H5VL_conn_inc_rc(H5VL_t *connector); -H5_DLL int64_t H5VL_conn_dec_rc(H5VL_t *connector); -H5_DLL herr_t H5VL_conn_free(const H5VL_connector_prop_t *info); -H5_DLL herr_t H5VL_get_cap_flags(const H5VL_connector_prop_t *prop, uint64_t *cap_flags); +H5_DLL herr_t H5VL_init_phase1(void); +H5_DLL herr_t H5VL_init_phase2(void); +H5_DLL herr_t H5VL_cmp_connector_cls(int *cmp_value, const H5VL_class_t *cls1, const H5VL_class_t *cls2); + +/* Connector routines */ +H5_DLL hid_t H5VL_conn_register(H5VL_connector_t *connector); +H5_DLL int64_t H5VL_conn_inc_rc(H5VL_connector_t *connector); +H5_DLL int64_t H5VL_conn_dec_rc(H5VL_connector_t *connector); +H5_DLL htri_t H5VL_conn_same_class(const H5VL_connector_t *connector1, const H5VL_connector_t *conn2); + +/* Connector property routines */ +H5_DLL herr_t H5VL_conn_prop_copy(H5VL_connector_prop_t *value); +H5_DLL herr_t H5VL_conn_prop_cmp(int *cmp_value, const H5VL_connector_prop_t *prop1, + const H5VL_connector_prop_t *prop2); +H5_DLL herr_t H5VL_conn_prop_free(const H5VL_connector_prop_t *info); +H5_DLL herr_t H5VL_conn_prop_get_cap_flags(const H5VL_connector_prop_t *prop, uint64_t *cap_flags); /* Functions that deal with VOL connectors */ union H5PL_key_t; @@ -97,8 +109,9 @@ H5_DLL void *H5VL_object_unwrap(const H5VL_object_t *vol_obj); H5_DLL void *H5VL_object_verify(hid_t id, H5I_type_t obj_type); H5_DLL H5VL_object_t *H5VL_vol_object(hid_t id); H5_DLL H5VL_object_t *H5VL_vol_object_verify(hid_t id, H5I_type_t obj_type); -H5_DLL H5VL_object_t *H5VL_create_object(void *object, H5VL_t *vol_connector); -H5_DLL H5VL_object_t *H5VL_create_object_using_vol_id(H5I_type_t type, void *obj, hid_t connector_id); +H5_DLL H5VL_object_t *H5VL_create_object(void *object, H5VL_connector_t *vol_connector); +H5_DLL H5VL_object_t *H5VL_new_vol_obj(H5I_type_t type, void *object, H5VL_connector_t *vol_connector, + bool wrap_obj); H5_DLL hsize_t H5VL_object_inc_rc(H5VL_object_t *obj); H5_DLL herr_t H5VL_free_object(H5VL_object_t *obj); H5_DLL herr_t H5VL_object_is_native(const H5VL_object_t *obj, bool *is_native); @@ -106,15 +119,19 @@ H5_DLL herr_t H5VL_file_is_same(const H5VL_object_t *vol_obj1, const H5V bool *same_file); /* Functions that wrap / unwrap VOL objects */ -H5_DLL herr_t H5VL_get_wrap_ctx(const H5VL_class_t *connector, void *obj, void **wrap_ctx); H5_DLL void *H5VL_wrap_object(const H5VL_class_t *connector, void *wrap_ctx, void *obj, H5I_type_t obj_type); H5_DLL void *H5VL_unwrap_object(const H5VL_class_t *connector, void *obj); -H5_DLL herr_t H5VL_free_wrap_ctx(const H5VL_class_t *connector, void *wrap_ctx); H5_DLL herr_t H5VL_set_vol_wrapper(const H5VL_object_t *vol_obj); H5_DLL herr_t H5VL_inc_vol_wrapper(void *vol_wrap_ctx); H5_DLL herr_t H5VL_dec_vol_wrapper(void *vol_wrap_ctx); H5_DLL herr_t H5VL_reset_vol_wrapper(void); +/* Functions that retrieve values from VOL types */ +H5_DLL size_t H5VL_obj_get_rc(const H5VL_object_t *vol_obj); +H5_DLL H5VL_connector_t *H5VL_obj_get_connector(const H5VL_object_t *vol_obj); +H5_DLL void *H5VL_obj_get_data(const H5VL_object_t *vol_obj); +H5_DLL void H5VL_obj_reset_data(H5VL_object_t *vol_obj); + /* Library state functions */ H5_DLL herr_t H5VL_retrieve_lib_state(void **state); H5_DLL herr_t H5VL_start_lib_state(void); @@ -123,10 +140,9 @@ H5_DLL herr_t H5VL_finish_lib_state(void); H5_DLL herr_t H5VL_free_lib_state(void *state); /* ID registration functions */ -H5_DLL hid_t H5VL_register(H5I_type_t type, void *object, H5VL_t *vol_connector, bool app_ref); +H5_DLL hid_t H5VL_register(H5I_type_t type, void *object, H5VL_connector_t *vol_connector, bool app_ref); H5_DLL hid_t H5VL_wrap_register(H5I_type_t type, void *obj, bool app_ref); -H5_DLL hid_t H5VL_register_using_vol_id(H5I_type_t type, void *obj, hid_t connector_id, bool app_ref); -H5_DLL herr_t H5VL_register_using_existing_id(H5I_type_t type, void *object, H5VL_t *vol_connector, +H5_DLL herr_t H5VL_register_using_existing_id(H5I_type_t type, void *object, H5VL_connector_t *vol_connector, bool app_ref, hid_t existing_id); /* Object access functions */ @@ -149,10 +165,10 @@ H5_DLL herr_t H5VL_setup_token_args(hid_t loc_id, H5O_token_t *obj_token, H5VL_o *********************************/ /* Connector "management" functions */ -H5_DLL int H5VL_copy_connector_info(const H5VL_class_t *connector, void **dst_info, const void *src_info); -H5_DLL herr_t H5VL_cmp_connector_info(const H5VL_class_t *connector, int *cmp_value, const void *info1, +H5_DLL int H5VL_copy_connector_info(const H5VL_connector_t *connector, void **dst_info, const void *src_info); +H5_DLL herr_t H5VL_cmp_connector_info(const H5VL_connector_t *connector, int *cmp_value, const void *info1, const void *info2); -H5_DLL herr_t H5VL_free_connector_info(hid_t connector_id, const void *info); +H5_DLL herr_t H5VL_free_connector_info(const H5VL_connector_t *connector, const void *info); /* Attribute functions */ H5_DLL void *H5VL_attr_create(const H5VL_object_t *vol_obj, const H5VL_loc_params_t *loc_params, @@ -178,10 +194,10 @@ H5_DLL void *H5VL_dataset_create(const H5VL_object_t *vol_obj, const H5VL_loc_p hid_t dcpl_id, hid_t dapl_id, hid_t dxpl_id, void **req); H5_DLL void *H5VL_dataset_open(const H5VL_object_t *vol_obj, const H5VL_loc_params_t *loc_params, const char *name, hid_t dapl_id, hid_t dxpl_id, void **req); -H5_DLL herr_t H5VL_dataset_read(size_t count, void *obj[], H5VL_t *connector, hid_t mem_type_id[], +H5_DLL herr_t H5VL_dataset_read(size_t count, void *obj[], H5VL_connector_t *connector, hid_t mem_type_id[], hid_t mem_space_id[], hid_t file_space_id[], hid_t dxpl_id, void *buf[], void **req); -H5_DLL herr_t H5VL_dataset_write(size_t count, void *obj[], H5VL_t *connector, hid_t mem_type_id[], +H5_DLL herr_t H5VL_dataset_write(size_t count, void *obj[], H5VL_connector_t *connector, hid_t mem_type_id[], hid_t mem_space_id[], hid_t file_space_id[], hid_t dxpl_id, const void *buf[], void **req); H5_DLL herr_t H5VL_dataset_get(const H5VL_object_t *vol_obj, H5VL_dataset_get_args_t *args, hid_t dxpl_id, @@ -209,10 +225,10 @@ H5_DLL herr_t H5VL_datatype_optional_op(H5VL_object_t *vol_obj, H5VL_optional_ar H5_DLL herr_t H5VL_datatype_close(const H5VL_object_t *vol_obj, hid_t dxpl_id, void **req); /* File functions */ -H5_DLL void *H5VL_file_create(const H5VL_connector_prop_t *connector_prop, const char *name, unsigned flags, +H5_DLL void *H5VL_file_create(const H5VL_connector_t *connector, const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, hid_t dxpl_id, void **req); -H5_DLL void *H5VL_file_open(H5VL_connector_prop_t *connector_prop, const char *name, unsigned flags, - hid_t fapl_id, hid_t dxpl_id, void **req); +H5_DLL void *H5VL_file_open(H5VL_connector_t *connector, const char *name, unsigned flags, hid_t fapl_id, + hid_t dxpl_id, void **req); H5_DLL herr_t H5VL_file_get(const H5VL_object_t *vol_obj, H5VL_file_get_args_t *args, hid_t dxpl_id, void **req); H5_DLL herr_t H5VL_file_specific(const H5VL_object_t *vol_obj, H5VL_file_specific_args_t *args, hid_t dxpl_id, diff --git a/src/H5VLquery.c b/src/H5VLquery.c new file mode 100644 index 00000000000..47fd0d21a2c --- /dev/null +++ b/src/H5VLquery.c @@ -0,0 +1,149 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Purpose: The Virtual Object Layer as described in documentation. + * The purpose is to provide an abstraction on how to access the + * underlying HDF5 container, whether in a local file with + * a specific file format, or remotely on other machines, etc... + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5VLmodule.h" /* This source code file is part of the H5VL module */ + +/***********/ +/* Headers */ +/***********/ + +#include "H5private.h" /* Generic Functions */ +#include "H5VLpkg.h" /* Virtual Object Layer */ + +/****************/ +/* Local Macros */ +/****************/ + +/******************/ +/* Local Typedefs */ +/******************/ + +/********************/ +/* Package Typedefs */ +/********************/ + +/********************/ +/* Local Prototypes */ +/********************/ + +/*********************/ +/* Package Variables */ +/*********************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/*******************/ +/* Local Variables */ +/*******************/ + +/*------------------------------------------------------------------------- + * Function: Retrieve the refcount for a VOL object + * + * Purpose: Quick and dirty routine to retrieve the VOL object's refcount. + * (Mainly added to stop non-file routines from poking about in the + * H5VL_object_t data structure) + * + * Return: Refcount on success/abort on failure (shouldn't fail) + * + *------------------------------------------------------------------------- + */ +size_t +H5VL_obj_get_rc(const H5VL_object_t *vol_obj) +{ + /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + assert(vol_obj); + + FUNC_LEAVE_NOAPI(vol_obj->rc) +} /* end H5VL_obj_get_rc() */ + +/*------------------------------------------------------------------------- + * Function: Retrieve the connector for a VOL object + * + * Purpose: Quick and dirty routine to retrieve the VOL object's connector. + * (Mainly added to stop non-file routines from poking about in the + * H5VL_object_t data structure) + * + * Return: Pointer to connector on success/abort on failure (shouldn't fail) + * + *------------------------------------------------------------------------- + */ +H5VL_connector_t * +H5VL_obj_get_connector(const H5VL_object_t *vol_obj) +{ + /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + assert(vol_obj); + + FUNC_LEAVE_NOAPI(vol_obj->connector) +} /* end H5VL_obj_get_connector() */ + +/*------------------------------------------------------------------------- + * Function: Retrieve the data for a VOL object + * + * Purpose: Quick and dirty routine to retrieve the VOL object's data. + * (Mainly added to stop non-file routines from poking about in the + * H5VL_object_t data structure) + * + * Return: Pointer to data on success/abort on failure (shouldn't fail) + * + *------------------------------------------------------------------------- + */ +void * +H5VL_obj_get_data(const H5VL_object_t *vol_obj) +{ + /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + assert(vol_obj); + + FUNC_LEAVE_NOAPI(vol_obj->data) +} /* end H5VL_obj_get_data() */ + +/*------------------------------------------------------------------------- + * Function: Resetthe data for a VOL object + * + * Purpose: Quick and dirty routine to reset the VOL object's data. + * (Mainly added to stop non-file routines from poking about in the + * H5VL_object_t data structure) + * + * Return: none + * + *------------------------------------------------------------------------- + */ +void +H5VL_obj_reset_data(H5VL_object_t *vol_obj) +{ + /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + assert(vol_obj); + + vol_obj->data = NULL; + + FUNC_LEAVE_NOAPI_VOID +} /* end H5VL_obj_reset_data() */ diff --git a/src/H5VLtest.c b/src/H5VLtest.c index 17368b77e8c..2c50a579e3e 100644 --- a/src/H5VLtest.c +++ b/src/H5VLtest.c @@ -31,8 +31,12 @@ /***********/ #include "H5private.h" /* Generic Functions */ #include "H5Eprivate.h" /* Error handling */ +#include "H5Iprivate.h" /* IDs */ #include "H5VLpkg.h" /* Virtual Object Layer */ +/* VOL connectors */ +#include "H5VLnative_private.h" /* Native VOL connector */ + /****************/ /* Local Macros */ /****************/ @@ -89,3 +93,65 @@ H5VL__reparse_def_vol_conn_variable_test(void) done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5VL__reparse_def_vol_conn_variable_test() */ + +/*------------------------------------------------------------------------- + * Function: H5VL__is_native_connector_test + * + * Purpose: Check if connector is the native connector + * + * Return: TRUE/FALSE/FAIL + * + *------------------------------------------------------------------------- + */ +htri_t +H5VL__is_native_connector_test(hid_t vol_id) +{ + H5VL_connector_t *native, *connector; + int cmp_value; /* Comparison result */ + htri_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments */ + if (NULL == (connector = H5I_object_verify(vol_id, H5I_VOL))) + HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, FAIL, "not a VOL connector ID"); + + /* For the time being, we disallow unregistering the native VOL connector */ + native = H5VL_NATIVE_conn_g; + if (H5VL_cmp_connector_cls(&cmp_value, connector->cls, native->cls) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTCOMPARE, FAIL, "can't compare connector classes"); + ret_value = (0 == cmp_value); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5VL__is_native_connector_test() */ + +/*------------------------------------------------------------------------- + * Function: H5VL__register_using_vol_id_test + * + * Purpose: Test infra wrapper around H5VL_register + * + * Return: Success: A valid HDF5 ID + * Failure: H5I_INVALID_HID + * + *------------------------------------------------------------------------- + */ +hid_t +H5VL__register_using_vol_id_test(H5I_type_t type, void *object, hid_t vol_id) +{ + H5VL_connector_t *connector; + hid_t ret_value = H5I_INVALID_HID; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments */ + if (NULL == (connector = H5I_object_verify(vol_id, H5I_VOL))) + HGOTO_ERROR(H5E_VOL, H5E_BADTYPE, H5I_INVALID_HID, "not a VOL connector ID"); + + /* Get an ID for the object */ + if ((ret_value = H5VL_register(type, object, connector, true)) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to get an ID for the object"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5VL__register_using_vol_id_test() */ diff --git a/src/H5VMprivate.h b/src/H5VMprivate.h index d4e427306db..456413fae9a 100644 --- a/src/H5VMprivate.h +++ b/src/H5VMprivate.h @@ -322,17 +322,14 @@ H5VM_vector_inc(int n, hsize_t *v1, const hsize_t *v2) /* Lookup table for general log2(n) routine */ static const unsigned char LogTable256[] = { - /* clang-clang-format off */ - 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 - /* clang-clang-format on */ -}; + 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; /*------------------------------------------------------------------------- * Function: H5VM_log2_gen diff --git a/src/H5build_settings.autotools.c.in b/src/H5build_settings.autotools.c.in index 5ca1555a157..67bd023b961 100644 --- a/src/H5build_settings.autotools.c.in +++ b/src/H5build_settings.autotools.c.in @@ -15,7 +15,6 @@ H5_GCC_DIAG_OFF("larger-than=") H5_CLANG_DIAG_OFF("overlength-strings") -/* clang-format off */ const char H5build_settings[]= " SUMMARY OF THE HDF5 CONFIGURATION\n" " =================================\n" @@ -114,7 +113,6 @@ const char H5build_settings[]= " Strict file format checks: @STRICT_FORMAT_CHECKS@\n" " Optimization instrumentation: @INSTRUMENT_LIBRARY@\n" ; -/* clang-format on */ H5_GCC_DIAG_ON("larger-than=") H5_CLANG_DIAG_OFF("overlength-strings") diff --git a/src/H5encode.h b/src/H5encode.h index 690aee104d7..d88757c4b7e 100644 --- a/src/H5encode.h +++ b/src/H5encode.h @@ -33,7 +33,7 @@ #define INT16ENCODE(p, i) \ do { \ - *(p) = (uint8_t)((unsigned)(i)&0xff); \ + *(p) = (uint8_t)((unsigned)(i) & 0xff); \ (p)++; \ *(p) = (uint8_t)(((unsigned)(i) >> 8) & 0xff); \ (p)++; \ @@ -41,7 +41,7 @@ #define UINT16ENCODE(p, i) \ do { \ - *(p) = (uint8_t)((unsigned)(i)&0xff); \ + *(p) = (uint8_t)((unsigned)(i) & 0xff); \ (p)++; \ *(p) = (uint8_t)(((unsigned)(i) >> 8) & 0xff); \ (p)++; \ @@ -49,7 +49,7 @@ #define INT32ENCODE(p, i) \ do { \ - *(p) = (uint8_t)((uint32_t)(i)&0xff); \ + *(p) = (uint8_t)((uint32_t)(i) & 0xff); \ (p)++; \ *(p) = (uint8_t)(((uint32_t)(i) >> 8) & 0xff); \ (p)++; \ @@ -61,7 +61,7 @@ #define UINT32ENCODE(p, i) \ do { \ - *(p) = (uint8_t)((i)&0xff); \ + *(p) = (uint8_t)((i) & 0xff); \ (p)++; \ *(p) = (uint8_t)(((i) >> 8) & 0xff); \ (p)++; \ @@ -149,41 +149,41 @@ #define INT16DECODE(p, i) \ do { \ - (i) = (int16_t)((*(p)&0xff)); \ + (i) = (int16_t)((*(p) & 0xff)); \ (p)++; \ - (i) |= (int16_t)(((*(p)&0xff) << 8) | ((*(p)&0x80) ? ~0xffff : 0x0)); \ + (i) |= (int16_t)(((*(p) & 0xff) << 8) | ((*(p) & 0x80) ? ~0xffff : 0x0)); \ (p)++; \ } while (0) #define UINT16DECODE(p, i) \ do { \ - (i) = (uint16_t)(*(p)&0xff); \ + (i) = (uint16_t)(*(p) & 0xff); \ (p)++; \ - (i) |= (uint16_t)((*(p)&0xff) << 8); \ + (i) |= (uint16_t)((*(p) & 0xff) << 8); \ (p)++; \ } while (0) #define INT32DECODE(p, i) \ do { \ - (i) = ((int32_t)(*(p)&0xff)); \ + (i) = ((int32_t)(*(p) & 0xff)); \ (p)++; \ - (i) |= ((int32_t)(*(p)&0xff) << 8); \ + (i) |= ((int32_t)(*(p) & 0xff) << 8); \ (p)++; \ - (i) |= ((int32_t)(*(p)&0xff) << 16); \ + (i) |= ((int32_t)(*(p) & 0xff) << 16); \ (p)++; \ - (i) |= ((int32_t)(((*(p) & (unsigned)0xff) << 24) | ((*(p)&0x80) ? ~0xffffffffULL : 0x0ULL))); \ + (i) |= ((int32_t)(((*(p) & (unsigned)0xff) << 24) | ((*(p) & 0x80) ? ~0xffffffffULL : 0x0ULL))); \ (p)++; \ } while (0) #define UINT32DECODE(p, i) \ do { \ - (i) = (uint32_t)(*(p)&0xff); \ + (i) = (uint32_t)(*(p) & 0xff); \ (p)++; \ - (i) |= ((uint32_t)(*(p)&0xff) << 8); \ + (i) |= ((uint32_t)(*(p) & 0xff) << 8); \ (p)++; \ - (i) |= ((uint32_t)(*(p)&0xff) << 16); \ + (i) |= ((uint32_t)(*(p) & 0xff) << 16); \ (p)++; \ - (i) |= ((uint32_t)(*(p)&0xff) << 24); \ + (i) |= ((uint32_t)(*(p) & 0xff) << 24); \ (p)++; \ } while (0) diff --git a/src/H5private.h b/src/H5private.h index 9950d0ccadc..86799d362b6 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -126,8 +126,8 @@ #define H5_DEFAULT_VFD H5FD_SEC2 #define H5_DEFAULT_VFD_NAME "sec2" -/* Define the default VOL driver */ -#define H5_DEFAULT_VOL H5VL_NATIVE +/* Define the default VOL connector */ +#define H5_DEFAULT_VOL H5VL_NATIVE_conn_g #ifdef H5_HAVE_WIN32_API @@ -857,7 +857,7 @@ H5_DLL H5_ATTR_CONST int Nflock(int fd, int operation); #ifdef H5_HAVE_VASPRINTF #define HDvasprintf(RET, FMT, A) vasprintf(RET, FMT, A) #else -H5_DLL int HDvasprintf(char **bufp, const char *fmt, va_list _ap); +H5_DLL int HDvasprintf(char **bufp, const char *fmt, va_list _ap); #endif #endif diff --git a/src/H5public.h b/src/H5public.h index 9dd18cd5ff7..460aca5f1e1 100644 --- a/src/H5public.h +++ b/src/H5public.h @@ -79,7 +79,7 @@ /** * For minor interface/format changes */ -#define H5_VERS_MINOR 15 +#define H5_VERS_MINOR 17 /** * For tweaks, bug-fixes, or development */ @@ -91,11 +91,11 @@ /** * Short version string */ -#define H5_VERS_STR "1.15.0" +#define H5_VERS_STR "1.17.0" /** * Full version string */ -#define H5_VERS_INFO "HDF5 library version: 1.15.0" +#define H5_VERS_INFO "HDF5 library version: 1.17.0" #define H5check() H5check_version(H5_VERS_MAJOR, H5_VERS_MINOR, H5_VERS_RELEASE) diff --git a/src/H5timer.c b/src/H5timer.c index badbc659213..db609ec611e 100644 --- a/src/H5timer.c +++ b/src/H5timer.c @@ -200,7 +200,7 @@ H5_now_usec(void) #else /* H5_HAVE_GETTIMEOFDAY */ /* Cast all values in this expression to uint64_t to ensure that all intermediate calculations * are done in 64 bit, to prevent overflow */ - now = ((uint64_t)time(NULL) * ((uint64_t)1000 * (uint64_t)1000)); + now = ((uint64_t)time(NULL) * ((uint64_t)1000 * (uint64_t)1000)); #endif /* H5_HAVE_GETTIMEOFDAY */ return (now); diff --git a/src/H5trace.c b/src/H5trace.c index d21d82b6664..9b27e072980 100644 --- a/src/H5trace.c +++ b/src/H5trace.c @@ -1453,7 +1453,11 @@ H5_trace_args(H5RS_str_t *rs, const char *type, va_list ap) break; case H5F_LIBVER_V116: - HDcompile_assert(H5F_LIBVER_LATEST == H5F_LIBVER_V116); + H5RS_acat(rs, "H5F_LIBVER_V116"); + break; + + case H5F_LIBVER_V118: + HDcompile_assert(H5F_LIBVER_LATEST == H5F_LIBVER_V118); H5RS_acat(rs, "H5F_LIBVER_LATEST"); break; diff --git a/src/Makefile.am b/src/Makefile.am index 87b12d08b06..1ae1119c758 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -104,7 +104,8 @@ libhdf5_la_SOURCES= H5.c H5build_settings.c H5checksum.c H5dbg.c H5system.c \ H5VLnative_attr.c H5VLnative_blob.c H5VLnative_dataset.c \ H5VLnative_datatype.c H5VLnative_file.c H5VLnative_group.c \ H5VLnative_link.c H5VLnative_introspect.c H5VLnative_object.c \ - H5VLnative_token.c H5VLpassthru.c H5VLtest.c \ + H5VLnative_token.c H5VLpassthru.c H5VLpassthru_int.c H5VLquery.c \ + H5VLtest.c \ H5VM.c H5WB.c H5Z.c \ H5Zdeflate.c H5Zfletcher32.c H5Znbit.c H5Zshuffle.c H5Zscaleoffset.c \ H5Zszip.c H5Ztrans.c diff --git a/src/uthash.h b/src/uthash.h index b1e5cbb9007..469448d83f5 100644 --- a/src/uthash.h +++ b/src/uthash.h @@ -700,28 +700,38 @@ typedef unsigned char uint8_t; switch (_hj_k) { \ case 11: \ hashv += ((unsigned)_hj_key[10] << 24); \ + H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ + case 10: \ + hashv += ((unsigned)_hj_key[9] << 16); \ + H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ + case 9: \ + hashv += ((unsigned)_hj_key[8] << 8); \ + H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ + case 8: \ + _hj_j += ((unsigned)_hj_key[7] << 24); \ + H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ + case 7: \ + _hj_j += ((unsigned)_hj_key[6] << 16); \ + H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ + case 6: \ + _hj_j += ((unsigned)_hj_key[5] << 8); \ + H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ + case 5: \ + _hj_j += _hj_key[4]; \ + H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ + case 4: \ + _hj_i += ((unsigned)_hj_key[3] << 24); \ + H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ + case 3: \ + _hj_i += ((unsigned)_hj_key[2] << 16); \ + H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ + case 2: \ + _hj_i += ((unsigned)_hj_key[1] << 8); \ + H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ + case 1: \ + _hj_i += _hj_key[0]; \ H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ - case 10 : hashv += ((unsigned)_hj_key[9] << 16); \ - H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ - case 9 : hashv += ((unsigned)_hj_key[8] << 8); \ - H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ - case 8 : _hj_j += ((unsigned)_hj_key[7] << 24); \ - H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ - case 7 : _hj_j += ((unsigned)_hj_key[6] << 16); \ - H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ - case 6 : _hj_j += ((unsigned)_hj_key[5] << 8); \ - H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ - case 5 : _hj_j += _hj_key[4]; \ - H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ - case 4 : _hj_i += ((unsigned)_hj_key[3] << 24); \ - H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ - case 3 : _hj_i += ((unsigned)_hj_key[2] << 16); \ - H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ - case 2 : _hj_i += ((unsigned)_hj_key[1] << 8); \ - H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ - case 1 : _hj_i += _hj_key[0]; \ - H5_ATTR_FALLTHROUGH /* FALLTHROUGH */ \ - default:; \ + default :; \ } \ HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ } while (0) diff --git a/test/API/H5_api_async_test.c b/test/API/H5_api_async_test.c index 56fb0cc2e0b..8399074a216 100644 --- a/test/API/H5_api_async_test.c +++ b/test/API/H5_api_async_test.c @@ -1196,9 +1196,9 @@ test_set_extent(void) hid_t file_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; hid_t fspace_id[6] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; + H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; hid_t fspace_out[6] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; + H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; hid_t mspace_id = H5I_INVALID_HID; hid_t dcpl_id = H5I_INVALID_HID; hid_t es_id = H5I_INVALID_HID; diff --git a/test/API/H5_api_test.c b/test/API/H5_api_test.c index df489a3c69d..70b68a0bde2 100644 --- a/test/API/H5_api_test.c +++ b/test/API/H5_api_test.c @@ -214,6 +214,8 @@ main(int argc, char **argv) goto done; } else { + int cmp = 0; + /* * If the connector was successfully registered, check that * the connector ID set on the default FAPL matches the ID @@ -231,7 +233,13 @@ main(int argc, char **argv) goto done; } - if (default_con_id != registered_con_id) { + if (H5VLcmp_connector_cls(&cmp, default_con_id, registered_con_id) < 0) { + fprintf(stderr, "Couldn't compare VOL connector classes\n"); + err_occurred = true; + goto done; + } + + if (0 != cmp) { fprintf(stderr, "VOL connector set on default FAPL didn't match specified VOL connector\n"); err_occurred = true; goto done; diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 02a9891d512..8c9b3073b9d 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -355,6 +355,7 @@ set (ttsafe_SOURCES ${HDF5_TEST_SOURCE_DIR}/ttsafe_semaphore.c ${HDF5_TEST_SOURCE_DIR}/ttsafe_thread_id.c ${HDF5_TEST_SOURCE_DIR}/ttsafe_thread_pool.c + ${HDF5_TEST_SOURCE_DIR}/ttsafe_error_stacks.c ) set (H5_EXPRESS_TESTS diff --git a/test/Makefile.am b/test/Makefile.am index 061bfcf0df5..b15e36d3f91 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -156,14 +156,14 @@ endif libh5test_la_SOURCES=h5test.c testframe.c cache_common.c swmr_common.c external_common.c -# Use libhd5test.la to compile all of the tests +# Use libh5test.la to compile all of the tests LDADD=libh5test.la $(LIBHDF5) # List the source files for tests that have more than one ttsafe_SOURCES=ttsafe.c ttsafe_acreate.c ttsafe_atomic.c ttsafe_attr_vlen.c \ ttsafe_cancel.c ttsafe_dcreate.c ttsafe_develop.c ttsafe_error.c \ ttsafe_rwlock.c ttsafe_rec_rwlock.c ttsafe_semaphore.c \ - ttsafe_thread_id.c ttsafe_thread_pool.c + ttsafe_thread_id.c ttsafe_thread_pool.c ttsafe_error_stacks.c cache_image_SOURCES=cache_image.c genall5.c mirror_vfd_SOURCES=mirror_vfd.c genall5.c diff --git a/test/accum.c b/test/accum.c index 62d1c9fd88a..308b64ad32c 100644 --- a/test/accum.c +++ b/test/accum.c @@ -61,7 +61,7 @@ void accum_printf(const H5F_t *f); #define accum_read(a, s, b) H5F_block_read(f, H5FD_MEM_DEFAULT, (haddr_t)(a), (size_t)(s), (b)) #define accum_free(f, a, s) H5F__accum_free(f->shared, H5FD_MEM_DEFAULT, (haddr_t)(a), (hsize_t)(s)) #define accum_flush(f) H5F__accum_flush(f->shared) -#define accum_reset(f) H5F__accum_reset(f->shared, true) +#define accum_reset(f) H5F__accum_reset(f->shared, true, false) /* ================= */ /* Main Test Routine */ diff --git a/test/btree2.c b/test/btree2.c index 0d2ad674806..1fb4d882d5e 100644 --- a/test/btree2.c +++ b/test/btree2.c @@ -9575,7 +9575,7 @@ test_modify(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_t *tpa modify = 4; H5E_BEGIN_TRY { - ret = H5B2_modify(bt2, &record, modify_cb, &modify); + ret = H5B2_modify(bt2, &record, false, modify_cb, &modify); } H5E_END_TRY /* Should fail */ @@ -9600,7 +9600,7 @@ test_modify(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_t *tpa /* Attempt to modify a record in a leaf node */ record = 4330; modify = 4331; - if (H5B2_modify(bt2, &record, modify_cb, &modify) < 0) + if (H5B2_modify(bt2, &record, false, modify_cb, &modify) < 0) FAIL_STACK_ERROR; /* Check status of B-tree */ @@ -9626,7 +9626,7 @@ test_modify(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_t *tpa found = HSIZET_MAX; H5E_BEGIN_TRY { - ret = H5B2_modify(bt2, &record, modify_cb, &modify); + ret = H5B2_modify(bt2, &record, false, modify_cb, &modify); } H5E_END_TRY /* Should fail */ @@ -9651,7 +9651,7 @@ test_modify(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_t *tpa /* Attempt to modify a record in an internal node */ record = 5350; modify = 5352; - if (H5B2_modify(bt2, &record, modify_cb, &modify) < 0) + if (H5B2_modify(bt2, &record, false, modify_cb, &modify) < 0) FAIL_STACK_ERROR; /* Check status of B-tree */ @@ -9677,7 +9677,7 @@ test_modify(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_t *tpa found = 5350; H5E_BEGIN_TRY { - ret = H5B2_modify(bt2, &record, modify_cb, &modify); + ret = H5B2_modify(bt2, &record, false, modify_cb, &modify); } H5E_END_TRY /* Should fail */ @@ -9702,7 +9702,7 @@ test_modify(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_t *tpa /* Attempt to modify a record in a root node */ record = 9445; modify = 9448; - if (H5B2_modify(bt2, &record, modify_cb, &modify) < 0) + if (H5B2_modify(bt2, &record, false, modify_cb, &modify) < 0) FAIL_STACK_ERROR; /* Check status of B-tree */ @@ -9728,7 +9728,7 @@ test_modify(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_t *tpa found = 9445; H5E_BEGIN_TRY { - ret = H5B2_modify(bt2, &record, modify_cb, &modify); + ret = H5B2_modify(bt2, &record, false, modify_cb, &modify); } H5E_END_TRY /* Should fail */ @@ -9924,7 +9924,7 @@ main(void) /* Reset library */ h5_test_init(); fapl = h5_fileaccess(); - localTestExpress = TestExpress; + localTestExpress = h5_get_testexpress(); /* For the Direct I/O driver, skip intensive tests due to poor performance */ if (localTestExpress < 2 && !strcmp(driver_name, "direct")) diff --git a/test/cache.c b/test/cache.c index d0cbf3f938f..5be0874df5f 100644 --- a/test/cache.c +++ b/test/cache.c @@ -3148,7 +3148,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ false}, - {/* entry_num = */ 1, + {/* entry_num = */ 1, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 75, /* insert_flag = */ false, @@ -3156,7 +3156,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ true, /* expected_destroyed = */ false}, - {/* entry_num = */ 2, + {/* entry_num = */ 2, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 25, /* insert_flag = */ true, @@ -3164,7 +3164,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ true, /* expected_destroyed = */ false}, - {/* entry_num = */ 3, + {/* entry_num = */ 3, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 50, /* insert_flag = */ true, @@ -3172,7 +3172,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ true, /* expected_destroyed = */ false}, - {/* entry_num = */ 4, + {/* entry_num = */ 4, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 10, /* insert_flag = */ false, @@ -3180,7 +3180,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ false}, - {/* entry_num = */ 5, + {/* entry_num = */ 5, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 20, /* insert_flag = */ false, @@ -3188,7 +3188,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ true, /* expected_destroyed = */ false}, - {/* entry_num = */ 6, + {/* entry_num = */ 6, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 30, /* insert_flag = */ true, @@ -3196,7 +3196,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ true, /* expected_destroyed = */ false}, - {/* entry_num = */ 7, + {/* entry_num = */ 7, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 40, /* insert_flag = */ true, @@ -3220,7 +3220,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ true}, - {/* entry_num = */ 1, + {/* entry_num = */ 1, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 75, /* insert_flag = */ false, @@ -3228,7 +3228,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ true, /* expected_destroyed = */ true}, - {/* entry_num = */ 2, + {/* entry_num = */ 2, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 25, /* insert_flag = */ true, @@ -3236,7 +3236,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ true, /* expected_destroyed = */ true}, - {/* entry_num = */ 3, + {/* entry_num = */ 3, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 50, /* insert_flag = */ true, @@ -3244,7 +3244,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ true, /* expected_destroyed = */ true}, - {/* entry_num = */ 4, + {/* entry_num = */ 4, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 10, /* insert_flag = */ false, @@ -3252,7 +3252,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ true}, - {/* entry_num = */ 5, + {/* entry_num = */ 5, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 20, /* insert_flag = */ false, @@ -3260,7 +3260,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ true, /* expected_destroyed = */ true}, - {/* entry_num = */ 6, + {/* entry_num = */ 6, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 30, /* insert_flag = */ true, @@ -3268,7 +3268,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ true, /* expected_destroyed = */ true}, - {/* entry_num = */ 7, + {/* entry_num = */ 7, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 40, /* insert_flag = */ true, @@ -3292,7 +3292,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ false}, - {/* entry_num = */ 1, + {/* entry_num = */ 1, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 75, /* insert_flag = */ false, @@ -3300,7 +3300,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ false}, - {/* entry_num = */ 2, + {/* entry_num = */ 2, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 25, /* insert_flag = */ true, @@ -3308,7 +3308,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ false, /* expected_destroyed = */ false}, - {/* entry_num = */ 3, + {/* entry_num = */ 3, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 50, /* insert_flag = */ true, @@ -3316,7 +3316,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ false, /* expected_destroyed = */ false}, - {/* entry_num = */ 4, + {/* entry_num = */ 4, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 10, /* insert_flag = */ false, @@ -3324,7 +3324,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ false}, - {/* entry_num = */ 5, + {/* entry_num = */ 5, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 20, /* insert_flag = */ false, @@ -3332,7 +3332,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ false}, - {/* entry_num = */ 6, + {/* entry_num = */ 6, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 30, /* insert_flag = */ true, @@ -3340,7 +3340,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ false, /* expected_destroyed = */ false}, - {/* entry_num = */ 7, + {/* entry_num = */ 7, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 40, /* insert_flag = */ true, @@ -3364,7 +3364,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ true}, - {/* entry_num = */ 1, + {/* entry_num = */ 1, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 75, /* insert_flag = */ false, @@ -3372,7 +3372,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ true}, - {/* entry_num = */ 2, + {/* entry_num = */ 2, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 25, /* insert_flag = */ true, @@ -3380,7 +3380,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ false, /* expected_destroyed = */ true}, - {/* entry_num = */ 3, + {/* entry_num = */ 3, /* entry_type = */ PICO_ENTRY_TYPE, /* entry_index = */ 50, /* insert_flag = */ true, @@ -3388,7 +3388,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ false, /* expected_destroyed = */ true}, - {/* entry_num = */ 4, + {/* entry_num = */ 4, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 10, /* insert_flag = */ false, @@ -3396,7 +3396,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ true}, - {/* entry_num = */ 5, + {/* entry_num = */ 5, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 20, /* insert_flag = */ false, @@ -3404,7 +3404,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ true, /* expected_serialized = */ false, /* expected_destroyed = */ true}, - {/* entry_num = */ 6, + {/* entry_num = */ 6, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 30, /* insert_flag = */ true, @@ -3412,7 +3412,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* expected_deserialized = */ false, /* expected_serialized = */ false, /* expected_destroyed = */ true}, - {/* entry_num = */ 7, + {/* entry_num = */ 7, /* entry_type = */ MONSTER_ENTRY_TYPE, /* entry_index = */ 40, /* insert_flag = */ true, @@ -3573,7 +3573,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* num_pins = */ 5, /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, PICO_ENTRY_TYPE, PICO_ENTRY_TYPE, PICO_ENTRY_TYPE, MONSTER_ENTRY_TYPE, -1, -1, - -1}, + -1}, /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, 10, -1, -1, -1}, /* expected_deserialized = */ true, /* expected_serialized = */ true, @@ -3586,7 +3586,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* num_pins = */ 6, /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, PICO_ENTRY_TYPE, PICO_ENTRY_TYPE, PICO_ENTRY_TYPE, MONSTER_ENTRY_TYPE, - MONSTER_ENTRY_TYPE, -1, -1}, + MONSTER_ENTRY_TYPE, -1, -1}, /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, 10, 20, -1, -1}, /* expected_deserialized = */ false, /* expected_serialized = */ true, @@ -3599,7 +3599,7 @@ check_flush_cache__multi_entry(H5F_t *file_ptr) /* num_pins = */ 7, /* pin_type[MAX_PINS] = */ {PICO_ENTRY_TYPE, PICO_ENTRY_TYPE, PICO_ENTRY_TYPE, PICO_ENTRY_TYPE, MONSTER_ENTRY_TYPE, - MONSTER_ENTRY_TYPE, MONSTER_ENTRY_TYPE, -1}, + MONSTER_ENTRY_TYPE, MONSTER_ENTRY_TYPE, -1}, /* pin_idx[MAX_PINS] = */ {100, 75, 25, 50, 10, 20, 30, -1}, /* expected_deserialized = */ false, /* expected_serialized = */ true, @@ -32368,7 +32368,7 @@ main(void) H5open(); - express_test = GetTestExpress(); + express_test = h5_get_testexpress(); printf("=========================================\n"); printf("Internal cache tests\n"); diff --git a/test/cache_api.c b/test/cache_api.c index 61c8062a0c0..502d954d707 100644 --- a/test/cache_api.c +++ b/test/cache_api.c @@ -2223,7 +2223,7 @@ main(void) H5open(); - express_test = GetTestExpress(); + express_test = h5_get_testexpress(); printf("===================================\n"); printf("Cache API tests\n"); diff --git a/test/cache_image.c b/test/cache_image.c index d2499631878..2cc2cc7f9bb 100644 --- a/test/cache_image.c +++ b/test/cache_image.c @@ -7762,7 +7762,7 @@ main(void) H5open(); - express_test = GetTestExpress(); + express_test = h5_get_testexpress(); printf("=========================================\n"); printf("Cache image tests\n"); diff --git a/test/cache_tagging.c b/test/cache_tagging.c index 9a86b8ab75d..ec4e917b50e 100644 --- a/test/cache_tagging.c +++ b/test/cache_tagging.c @@ -18,7 +18,7 @@ #define H5F_TESTING #include "H5Fpkg.h" -#include "testhdf5.h" +#include "h5test.h" #include "cache_common.h" #include "H5CXprivate.h" /* API Contexts */ diff --git a/test/chunk_info.c b/test/chunk_info.c index 4591b7242b4..991d4640cad 100644 --- a/test/chunk_info.c +++ b/test/chunk_info.c @@ -38,19 +38,15 @@ #define H5D_TESTING /* to use H5D__ functions */ #include "H5Dpkg.h" -#include "testhdf5.h" +#include "h5test.h" #ifdef H5_HAVE_FILTER_DEFLATE #include "zlib.h" #endif /* Test file names, using H5F_libver_t as indices */ -static const char *FILENAME[] = {"tchunk_info_earliest", - "tchunk_info_v18", - "tchunk_info_v110", - "tchunk_info_v112", - "tchunk_info_v114", - "tchunk_info_v116", - NULL}; +static const char *FILENAME[] = { + "tchunk_info_earliest", "tchunk_info_v18", "tchunk_info_v110", "tchunk_info_v112", + "tchunk_info_v114", "tchunk_info_v116", "tchunk_info_v118", NULL}; /* File to be used in test_failed_attempts */ #define FILTERMASK_FILE "tflt_msk" diff --git a/test/dsets.c b/test/dsets.c index ad5dd28f2b1..0df604e7ae6 100644 --- a/test/dsets.c +++ b/test/dsets.c @@ -20,7 +20,7 @@ #define H5Z_FRIEND /*suppress error about including H5Zpkg */ -#include "testhdf5.h" +#include "h5test.h" #include "H5srcdir.h" #include "H5CXprivate.h" /* API Contexts */ @@ -990,14 +990,16 @@ test_compact_io(hid_t fapl) /* Verify the dataset's layout and fill message versions */ if (fp->shared->low_bound == H5F_LIBVER_EARLIEST) { - VERIFY(dsetp->shared->layout.version, H5O_LAYOUT_VERSION_DEFAULT, "layout_ver_bounds"); - VERIFY(dsetp->shared->dcpl_cache.fill.version, H5O_FILL_VERSION_2, "fill_ver_bounds"); + if (dsetp->shared->layout.version != H5O_LAYOUT_VERSION_DEFAULT) + TEST_ERROR; + if (dsetp->shared->dcpl_cache.fill.version != H5O_FILL_VERSION_2) + TEST_ERROR; } else { - VERIFY(dsetp->shared->layout.version, H5O_layout_ver_bounds[fp->shared->low_bound], - "layout_ver_bounds"); - VERIFY(dsetp->shared->dcpl_cache.fill.version, H5O_fill_ver_bounds[fp->shared->low_bound], - "fill_ver_bounds"); + if (dsetp->shared->layout.version != H5O_layout_ver_bounds[fp->shared->low_bound]) + TEST_ERROR; + if (dsetp->shared->dcpl_cache.fill.version != H5O_fill_ver_bounds[fp->shared->low_bound]) + TEST_ERROR; } /* Close the dataset and delete from the file */ @@ -3319,7 +3321,7 @@ test_nbit_float(hid_t file) * dataset datatype (no precision loss during datatype conversion) */ float orig_data[2][5] = {{188384.0F, 19.103516F, -1.0831790e9F, -84.242188F, 5.2045898F}, - {-49140.0F, 2350.25F, -3.2110596e-1F, 6.4998865e-5F, -0.0F}}; + {-49140.0F, 2350.25F, -3.2110596e-1F, 6.4998865e-5F, -0.0F}}; float new_data[2][5]; size_t precision, offset; size_t i, j; @@ -3706,7 +3708,7 @@ test_nbit_compound(hid_t file) const hsize_t size[2] = {2, 5}; const hsize_t chunk_size[2] = {2, 5}; const float float_val[2][5] = {{188384.0F, 19.103516F, -1.0831790e9F, -84.242188F, 5.2045898F}, - {-49140.0F, 2350.25F, -3.2110596e-1F, 6.4998865e-5F, -0.0F}}; + {-49140.0F, 2350.25F, -3.2110596e-1F, 6.4998865e-5F, -0.0F}}; atomic orig_data[2][5]; atomic new_data[2][5]; unsigned int i_mask, s_mask, c_mask; @@ -3933,7 +3935,7 @@ test_nbit_compound_2(hid_t file) const hsize_t size[2] = {2, 5}; const hsize_t chunk_size[2] = {2, 5}; const float float_val[2][5] = {{188384.0F, 19.103516F, -1.0831790e9F, -84.242188F, 5.2045898F}, - {-49140.0F, 2350.25F, -3.2110596e-1F, 6.4998865e-5F, -0.0F}}; + {-49140.0F, 2350.25F, -3.2110596e-1F, 6.4998865e-5F, -0.0F}}; complex orig_data[2][5]; complex new_data[2][5]; unsigned int i_mask, s_mask, c_mask, b_mask; @@ -12722,9 +12724,9 @@ test_bt2_hdr_fd(const char *driver_name, hid_t fapl) const hsize_t maxshape[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; const hsize_t chunk[2] = {8, 8}; const int buffer[8][8] = {{0, 1, 2, 3, 4, 5, 6, 7}, {8, 9, 10, 11, 12, 13, 14, 15}, - {16, 17, 18, 19, 20, 21, 22, 23}, {24, 25, 26, 27, 28, 29, 30, 31}, - {32, 33, 34, 35, 36, 37, 38, 39}, {40, 41, 42, 43, 44, 45, 46, 47}, - {48, 49, 50, 51, 52, 53, 54, 55}, {56, 57, 58, 59, 60, 61, 62, 63}}; + {16, 17, 18, 19, 20, 21, 22, 23}, {24, 25, 26, 27, 28, 29, 30, 31}, + {32, 33, 34, 35, 36, 37, 38, 39}, {40, 41, 42, 43, 44, 45, 46, 47}, + {48, 49, 50, 51, 52, 53, 54, 55}, {56, 57, 58, 59, 60, 61, 62, 63}}; H5O_info2_t info; TESTING("Version 2 B-tree chunk index header flush dependencies handled correctly"); @@ -14983,7 +14985,8 @@ test_versionbounds(void) if (vdset > 0) /* dataset created successfully */ { /* Virtual dataset is only available starting in V110 */ - VERIFY(high >= H5F_LIBVER_V110, true, "virtual dataset"); + if (high < H5F_LIBVER_V110) + TEST_ERROR; if (H5Dclose(vdset) < 0) TEST_ERROR; @@ -16165,8 +16168,7 @@ main(void) goto error; printf("All dataset tests passed.\n"); #ifdef H5_HAVE_FILTER_SZIP - if (GetTestCleanup()) - HDremove(NOENCODER_COPY_FILENAME); + HDremove(NOENCODER_COPY_FILENAME); #endif /* H5_HAVE_FILTER_SZIP */ h5_cleanup(FILENAME, fapl); diff --git a/test/dtransform.c b/test/dtransform.c index 6ec48a68e25..53b1f1bc1a8 100644 --- a/test/dtransform.c +++ b/test/dtransform.c @@ -121,7 +121,7 @@ const int transformData[ROWS][COLS] = {{36, 31, 25, 19, 13, 7, 1, 5, 11, 16, 22, do { \ struct { \ TYPE arr[ROWS][COLS]; \ - } *array = NULL; \ + } *array = NULL; \ const char *f_to_c = "(5/9.0)*(x-32)"; \ /* utrans is a transform for char types: numbers are restricted from -128 to 127, fits into char */ \ const char *utrans = "(x/4+25)*3"; \ @@ -217,7 +217,7 @@ const int transformData[ROWS][COLS] = {{36, 31, 25, 19, 13, 7, 1, 5, 11, 16, 22, do { \ struct { \ TYPE arr[ROWS][COLS]; \ - } *array = NULL; \ + } *array = NULL; \ const char *f_to_c = "(5/9.0)*(x-32)"; \ /* utrans is a transform for char types: numbers are restricted from -128 to 127, fits into char */ \ const char *utrans = "(x/4+25)*3"; \ diff --git a/test/dtypes.c b/test/dtypes.c index 86cd27e3868..41b3ddf5a49 100644 --- a/test/dtypes.c +++ b/test/dtypes.c @@ -14,7 +14,7 @@ * Purpose: Tests the datatype interface (H5T) */ -#include "testhdf5.h" +#include "h5test.h" #include "H5srcdir.h" #include "H5Iprivate.h" /* For checking that datatype id's don't leak */ @@ -774,10 +774,10 @@ test_compound_2(void) { struct st { int a, b, c[4], d, e; - } * s_ptr; + } *s_ptr; struct dt { int e, d, c[4], b, a; - } * d_ptr; + } *d_ptr; const size_t nelmts = NTESTELEM; const hsize_t four = 4; @@ -899,10 +899,10 @@ test_compound_3(void) { struct st { int a, b, c[4], d, e; - } * s_ptr; + } *s_ptr; struct dt { int a, c[4], e; - } * d_ptr; + } *d_ptr; const size_t nelmts = NTESTELEM; const hsize_t four = 4; @@ -1021,13 +1021,13 @@ test_compound_4(void) struct st { int a, b, c[4], d, e; - } * s_ptr; + } *s_ptr; struct dt { short b; int a, c[4]; short d; int e; - } * d_ptr; + } *d_ptr; const size_t nelmts = NTESTELEM; const hsize_t four = 4; @@ -1249,11 +1249,11 @@ test_compound_6(void) struct st { short b; short d; - } * s_ptr; + } *s_ptr; struct dt { long b; long d; - } * d_ptr; + } *d_ptr; const size_t nelmts = NTESTELEM; unsigned char *buf = NULL, *orig = NULL, *bkg = NULL; @@ -7693,7 +7693,7 @@ test_int_float_except(void) { #if H5_SIZEOF_INT == 4 && H5_SIZEOF_FLOAT == 4 float buf[CONVERT_SIZE] = {(float)INT_MIN - 172.0F, (float)INT_MAX - 32.0F, (float)INT_MAX - 68.0F, - (float)4.5F}; + (float)4.5F}; int buf_int[CONVERT_SIZE] = {INT_MIN, INT_MAX, INT_MAX - 127, 4}; float buf_float[CONVERT_SIZE] = {(float)INT_MIN, (float)INT_MAX + 1.0F, (float)INT_MAX - 127.0F, 4}; int *intp; /* Pointer to buffer, as integers */ @@ -9528,8 +9528,8 @@ test_deprec(hid_t fapl) dim_mismatch = false; for (u = 0; u < rank; u++) if (rdims[u] != dims[u]) { - TestErrPrintf("Array dimension information doesn't match!, rdims1[%u]=%d, tdims1[%u]=%d\n", u, - (int)rdims[u], u, (int)dims[u]); + fprintf(stderr, "Array dimension information doesn't match!, rdims1[%u]=%d, tdims1[%u]=%d\n", u, + (int)rdims[u], u, (int)dims[u]); dim_mismatch = true; } /* end if */ if (dim_mismatch) @@ -9539,9 +9539,9 @@ test_deprec(hid_t fapl) dim_mismatch = false; for (u = 0; u < rank; u++) if (rperm[u] != -2) { - TestErrPrintf( - "Array dimension permutation information was modified!, rdims1[%u]=%d, tdims1[%u]=%d\n", u, - rperm[u], u, perm[u]); + fprintf(stderr, + "Array dimension permutation information was modified!, rdims1[%u]=%d, tdims1[%u]=%d\n", + u, rperm[u], u, perm[u]); dim_mismatch = true; } /* end if */ if (dim_mismatch) @@ -9946,14 +9946,19 @@ verify_version(hid_t dtype, H5F_libver_t low, unsigned *highest_version) case H5T_ARRAY: { H5T_t *base_dtypep = NULL; /* Internal structure of a datatype */ - if (low == H5F_LIBVER_EARLIEST) - VERIFY(dtypep->shared->version, H5O_DTYPE_VERSION_2, "H5O_dtype_ver_bounds"); - else - VERIFY(dtypep->shared->version, H5O_dtype_ver_bounds[low], "H5O_dtype_ver_bounds"); + if (low == H5F_LIBVER_EARLIEST) { + if (dtypep->shared->version != H5O_DTYPE_VERSION_2) + TEST_ERROR; + } + else { + if (dtypep->shared->version != H5O_dtype_ver_bounds[low]) + TEST_ERROR; + } /* Get the base datatype of this array type */ base_dtype = H5Tget_super(dtype); - CHECK(base_dtype, FAIL, "H5Tget_super"); + if (base_dtype == H5I_INVALID_HID) + TEST_ERROR; /* Get the base type's internal structure for version */ base_dtypep = (H5T_t *)H5I_object(base_dtype); @@ -10014,19 +10019,25 @@ verify_version(hid_t dtype, H5F_libver_t low, unsigned *highest_version) } /* If this compound datatype contains a datatype of higher version, it will be promoted to that version, thus, verify with highest version */ - if (*highest_version > H5O_dtype_ver_bounds[low]) - VERIFY(dtypep->shared->version, *highest_version, "verify_version"); - else - VERIFY(dtypep->shared->version, H5O_dtype_ver_bounds[low], "verify_version"); + if (*highest_version > H5O_dtype_ver_bounds[low]) { + if (dtypep->shared->version != *highest_version) + TEST_ERROR; + } + else { + if (dtypep->shared->version != H5O_dtype_ver_bounds[low]) + TEST_ERROR; + } break; } case H5T_ENUM: - VERIFY(dtypep->shared->version, H5O_dtype_ver_bounds[low], "verify_version"); + if (dtypep->shared->version != H5O_dtype_ver_bounds[low]) + TEST_ERROR; break; case H5T_VLEN: case H5T_FLOAT: case H5T_INTEGER: - VERIFY(dtypep->shared->version, H5O_dtype_ver_bounds[H5F_LIBVER_EARLIEST], "verify_version"); + if (dtypep->shared->version != H5O_dtype_ver_bounds[H5F_LIBVER_EARLIEST]) + TEST_ERROR; break; case H5T_NCLASSES: case H5T_NO_CLASS: @@ -10115,7 +10126,7 @@ test_versionbounds(void) hsize_t arr_dim[] = {ARRAY_LEN}; /* Length of the array */ int low, high; /* Indices for iterating over versions */ H5F_libver_t versions[] = {H5F_LIBVER_EARLIEST, H5F_LIBVER_V18, H5F_LIBVER_V110, - H5F_LIBVER_V112, H5F_LIBVER_V114, H5F_LIBVER_V116}; + H5F_LIBVER_V112, H5F_LIBVER_V114, H5F_LIBVER_V116}; int versions_count = 6; /* Number of version bounds in the array */ unsigned highest_version; /* Highest version in nested datatypes */ color_t enum_val; /* Enum type index */ diff --git a/test/earray.c b/test/earray.c index aefb9cfc465..edd054994c7 100644 --- a/test/earray.c +++ b/test/earray.c @@ -2302,8 +2302,6 @@ main(void) /* Reset library */ h5_test_init(); fapl = h5_fileaccess(); - if (TestExpress > 0) - printf("***Express test mode %d. Some tests may be skipped\n", TestExpress); /* Set the filename to use for this test (dependent on fapl) */ h5_fixname(FILENAME[0], fapl, filename_g, sizeof(filename_g)); diff --git a/test/enc_dec_plist.c b/test/enc_dec_plist.c index 878fe86449b..7a289618eaa 100644 --- a/test/enc_dec_plist.c +++ b/test/enc_dec_plist.c @@ -14,7 +14,7 @@ * Serial tests for encoding/decoding plists */ -#include "testhdf5.h" +#include "h5test.h" #include "H5ACprivate.h" #include "H5Pprivate.h" @@ -191,9 +191,6 @@ main(void) "Testing ENCODE/DECODE with file version bounds: (%s, %s):", low_string, high_string); puts(msg); - if (VERBOSE_MED) - printf("Encode/Decode DCPLs\n"); - /******* ENCODE/DECODE DCPLS *****/ TESTING("Default DCPL Encoding/Decoding"); if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) diff --git a/test/enc_dec_plist_cross_platform.c b/test/enc_dec_plist_cross_platform.c index d2d68105986..e7403576a8e 100644 --- a/test/enc_dec_plist_cross_platform.c +++ b/test/enc_dec_plist_cross_platform.c @@ -26,9 +26,6 @@ static int test_plists(const char *filename_prefix); int main(void) { - if (VERBOSE_MED) - printf("Encode/Decode property list endianness\n"); - /******* ENCODE/DECODE DCPLS *****/ TESTING("Default DCPL Encoding/Decoding"); if (test_plists("plist_files/def_dcpl_") < 0) diff --git a/test/enum.c b/test/enum.c index 2c8e8cd28d7..ac348fcb437 100644 --- a/test/enum.c +++ b/test/enum.c @@ -139,8 +139,8 @@ test_conv(hid_t file) c_e1 val; /* Some values are out of range for testing. The library should accept them */ c_e1 data1[] = {E1_RED, E1_GREEN, E1_BLUE, E1_GREEN, E1_WHITE, E1_WHITE, E1_BLACK, - E1_GREEN, E1_BLUE, E1_RED, E1_RED, E1_BLUE, E1_GREEN, E1_BLACK, - E1_WHITE, E1_RED, E1_WHITE, (c_e1)0, (c_e1)-1, (c_e1)-2}; + E1_GREEN, E1_BLUE, E1_RED, E1_RED, E1_BLUE, E1_GREEN, E1_BLACK, + E1_WHITE, E1_RED, E1_WHITE, (c_e1)0, (c_e1)-1, (c_e1)-2}; c_e1 data2[NELMTS(data1)]; short data_short[NELMTS(data1)]; int data_int[NELMTS(data1)]; @@ -314,7 +314,7 @@ test_tr1(hid_t file) c_e1 eval; int ival; c_e1 data1[10] = {E1_RED, E1_GREEN, E1_BLUE, E1_GREEN, E1_WHITE, - E1_WHITE, E1_BLACK, E1_GREEN, E1_BLUE, E1_RED}; + E1_WHITE, E1_BLACK, E1_GREEN, E1_BLUE, E1_RED}; c_e1 data2[10]; TESTING("O(1) conversions"); @@ -414,7 +414,7 @@ test_tr2(hid_t file) c_e1 val1; int val2; c_e1 data1[10] = {E1_RED, E1_GREEN, E1_BLUE, E1_GREEN, E1_WHITE, - E1_WHITE, E1_BLACK, E1_GREEN, E1_BLUE, E1_RED}; + E1_WHITE, E1_BLACK, E1_GREEN, E1_BLUE, E1_RED}; c_e1 data2[10]; TESTING("O(log N) conversions"); diff --git a/test/event_set.c b/test/event_set.c index 0880157f430..65ecc12887a 100644 --- a/test/event_set.c +++ b/test/event_set.c @@ -316,6 +316,7 @@ test_es_get_requests(void) void *requests[2]; /* Requests */ int req_targets[2]; /* Dummy targets for void * requests */ size_t count; /* # of events in set */ + int cmp_value; /* Comparison value */ bool op_failed; /* Whether an operation failed (unused) */ TESTING("event set get requests"); @@ -395,7 +396,12 @@ test_es_get_requests(void) TEST_ERROR; if (count != 1) TEST_ERROR; - if (connector_ids[0] != connector_ids_g[0]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[0], connector_ids_g[0]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[0]) < 0) TEST_ERROR; if (connector_ids[1] != H5I_INVALID_HID) TEST_ERROR; @@ -423,7 +429,12 @@ test_es_get_requests(void) TEST_ERROR; if (count != 1) TEST_ERROR; - if (connector_ids[0] != connector_ids_g[0]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[0], connector_ids_g[0]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[0]) < 0) TEST_ERROR; if (connector_ids[1] != H5I_INVALID_HID) TEST_ERROR; @@ -451,9 +462,19 @@ test_es_get_requests(void) TEST_ERROR; if (count != 2) TEST_ERROR; - if (connector_ids[0] != connector_ids_g[0]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[0], connector_ids_g[0]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[0]) < 0) + TEST_ERROR; + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[1], connector_ids_g[1]) < 0) TEST_ERROR; - if (connector_ids[1] != connector_ids_g[1]) + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[1]) < 0) TEST_ERROR; /* Try with H5_ITER_DEC */ @@ -464,9 +485,19 @@ test_es_get_requests(void) TEST_ERROR; if (count != 2) TEST_ERROR; - if (connector_ids[0] != connector_ids_g[1]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[0], connector_ids_g[1]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[0]) < 0) + TEST_ERROR; + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[1], connector_ids_g[0]) < 0) TEST_ERROR; - if (connector_ids[1] != connector_ids_g[0]) + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[1]) < 0) TEST_ERROR; /* Get only requests */ @@ -505,9 +536,19 @@ test_es_get_requests(void) TEST_ERROR; if (count != 2) TEST_ERROR; - if (connector_ids[0] != connector_ids_g[0]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[0], connector_ids_g[0]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[0]) < 0) TEST_ERROR; - if (connector_ids[1] != connector_ids_g[1]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[1], connector_ids_g[1]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[1]) < 0) TEST_ERROR; if (requests[0] != &req_targets[0]) TEST_ERROR; @@ -524,9 +565,19 @@ test_es_get_requests(void) TEST_ERROR; if (count != 2) TEST_ERROR; - if (connector_ids[0] != connector_ids_g[1]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[0], connector_ids_g[1]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[0]) < 0) TEST_ERROR; - if (connector_ids[1] != connector_ids_g[0]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[1], connector_ids_g[0]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[1]) < 0) TEST_ERROR; if (requests[0] != &req_targets[1]) TEST_ERROR; @@ -541,7 +592,12 @@ test_es_get_requests(void) TEST_ERROR; if (count != 2) TEST_ERROR; - if (connector_ids[0] != connector_ids_g[0]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[0], connector_ids_g[0]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[0]) < 0) TEST_ERROR; if (connector_ids[1] != H5I_INVALID_HID) TEST_ERROR; @@ -554,7 +610,12 @@ test_es_get_requests(void) TEST_ERROR; if (count != 2) TEST_ERROR; - if (connector_ids[0] != connector_ids_g[1]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[0], connector_ids_g[1]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[0]) < 0) TEST_ERROR; if (connector_ids[1] != H5I_INVALID_HID) TEST_ERROR; @@ -593,7 +654,12 @@ test_es_get_requests(void) requests[1] = NULL; if (H5ESget_requests(es_id, H5_ITER_INC, connector_ids, requests, 1, &count) < 0) TEST_ERROR; - if (connector_ids[0] != connector_ids_g[0]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[0], connector_ids_g[0]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[0]) < 0) TEST_ERROR; if (connector_ids[1] != H5I_INVALID_HID) TEST_ERROR; @@ -610,7 +676,12 @@ test_es_get_requests(void) requests[1] = NULL; if (H5ESget_requests(es_id, H5_ITER_DEC, connector_ids, requests, 1, &count) < 0) TEST_ERROR; - if (connector_ids[0] != connector_ids_g[1]) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, connector_ids[0], connector_ids_g[1]) < 0) + TEST_ERROR; + if (cmp_value) + TEST_ERROR; + if (H5Idec_ref(connector_ids[0]) < 0) TEST_ERROR; if (connector_ids[1] != H5I_INVALID_HID) TEST_ERROR; diff --git a/test/farray.c b/test/farray.c index 69162aec1ad..b4cd1fef3c8 100644 --- a/test/farray.c +++ b/test/farray.c @@ -1633,8 +1633,6 @@ main(void) /* Reset library */ h5_test_init(); fapl = h5_fileaccess(); - if (TestExpress > 0) - printf("***Express test mode %d. Some tests may be skipped\n", TestExpress); /* Set the filename to use for this test (dependent on fapl) */ h5_fixname(FILENAME[0], fapl, filename_g, sizeof(filename_g)); diff --git a/test/fheap.c b/test/fheap.c index 4aed1666bf2..7c62a5dc083 100644 --- a/test/fheap.c +++ b/test/fheap.c @@ -15960,6 +15960,7 @@ main(void) const char *driver_name; /* Environment variable */ bool contig_addr_vfd; /* Whether VFD used has a contiguous address space */ bool api_ctx_pushed = false; /* Whether API context pushed */ + int test_express; /* Don't run this test using certain file drivers */ driver_name = h5_get_test_driver_name(); @@ -15982,9 +15983,10 @@ main(void) * Activate full testing when this feature is re-enabled * in the future for parallel build. */ - if (TestExpress > 0) - printf("***Express test mode %d. Some tests may be skipped\n", TestExpress); - else if (TestExpress == 0) { + test_express = h5_get_testexpress(); + if (test_express > 0) + printf("***Express test mode %d. Some tests may be skipped\n", test_express); + else if (test_express == 0) { #ifdef H5_HAVE_PARALLEL num_pb_fs = NUM_PB_FS - 2; #else @@ -16200,7 +16202,7 @@ main(void) /* If this test fails, uncomment the tests above, which build up to this * level of complexity gradually. -QAK */ - if (TestExpress > 1) + if (test_express > 1) printf( "***Express test mode on. test_man_start_5th_recursive_indirect is skipped\n"); else @@ -16248,7 +16250,7 @@ main(void) nerrors += test_man_remove_first_row(fapl, &small_cparam, &tparam); nerrors += test_man_remove_first_two_rows(fapl, &small_cparam, &tparam); nerrors += test_man_remove_first_four_rows(fapl, &small_cparam, &tparam); - if (TestExpress > 1) + if (test_express > 1) printf("***Express test mode on. Some tests skipped\n"); else { nerrors += test_man_remove_all_root_direct(fapl, &small_cparam, &tparam); @@ -16298,7 +16300,7 @@ main(void) nerrors += test_man_fill_1st_row_3rd_direct_fill_2nd_direct_less_one_wrap_start_block_add_skipped( fapl, &small_cparam, &tparam); - if (TestExpress > 1) + if (test_express > 1) printf("***Express test mode on. Some tests skipped\n"); else { nerrors += @@ -16428,7 +16430,7 @@ main(void) } /* end block */ /* Random object insertion & deletion */ - if (TestExpress > 1) + if (test_express > 1) printf("***Express test mode on. Some tests skipped\n"); else { /* Random tests using "small" heap creation parameters */ diff --git a/test/flushrefresh.c b/test/flushrefresh.c index bab334f1c68..0e662209212 100644 --- a/test/flushrefresh.c +++ b/test/flushrefresh.c @@ -26,7 +26,7 @@ /* Includes */ /* ======== */ -#include "testhdf5.h" +#include "h5test.h" #include "H5FDpkg.h" /* File Drivers */ /* ======= */ diff --git a/test/gen_filespace.c b/test/gen_filespace.c index 27f3d2628cb..d2572887674 100644 --- a/test/gen_filespace.c +++ b/test/gen_filespace.c @@ -26,8 +26,8 @@ const char *FILENAMES[] = { #define DATASET "dset" #define NUM_ELMTS 100 -#define false 0 -#define true 1 +#define false 0 +#define true 1 /* * Compile and run this program in the trunk to generate diff --git a/test/gen_plist.c b/test/gen_plist.c index 1400b4095a6..5f5f95c80e1 100644 --- a/test/gen_plist.c +++ b/test/gen_plist.c @@ -48,35 +48,35 @@ main(void) int little_endian; int word_length; H5AC_cache_config_t my_cache_config = {H5AC__CURR_CACHE_CONFIG_VERSION, - 1 /*true*/, - 0 /*false*/, - 0 /*false*/, - "temp", - 1 /*true*/, - 0 /*false*/, - (2 * 2048 * 1024), - 0.3, - (64 * 1024 * 1024), - (4 * 1024 * 1024), - 60000, - H5C_incr__threshold, - 0.8, - 3.0, - 1 /*true*/, - (8 * 1024 * 1024), - H5C_flash_incr__add_space, - 2.0, - 0.25, - H5C_decr__age_out_with_threshold, - 0.997, - 0.8, - 1 /*true*/, - (3 * 1024 * 1024), - 3, - 0 /*false*/, - 0.2, - (256 * 2048), - H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY}; + 1 /*true*/, + 0 /*false*/, + 0 /*false*/, + "temp", + 1 /*true*/, + 0 /*false*/, + (2 * 2048 * 1024), + 0.3, + (64 * 1024 * 1024), + (4 * 1024 * 1024), + 60000, + H5C_incr__threshold, + 0.8, + 3.0, + 1 /*true*/, + (8 * 1024 * 1024), + H5C_flash_incr__add_space, + 2.0, + 0.25, + H5C_decr__age_out_with_threshold, + 0.997, + 0.8, + 1 /*true*/, + (3 * 1024 * 1024), + 3, + 0 /*false*/, + 0.2, + (256 * 2048), + H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY}; H5AC_cache_image_config_t my_cache_image_config = {H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION, true, false, -1}; diff --git a/test/h5test.c b/test/h5test.c index 61c01da529c..828d83f40f4 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -107,6 +107,7 @@ const char *LIBVER_NAMES[] = {"earliest", /* H5F_LIBVER_EARLIEST = 0 */ "v112", /* H5F_LIBVER_V112 = 3 */ "v114", /* H5F_LIBVER_V114 = 4 */ "v116", /* H5F_LIBVER_V116 = 5 */ + "v118", /* H5F_LIBVER_V118 = 6 */ "latest", /* H5F_LIBVER_LATEST */ NULL}; @@ -114,11 +115,15 @@ const char *LIBVER_NAMES[] = {"earliest", /* H5F_LIBVER_EARLIEST = 0 */ static H5E_auto2_t err_func = NULL; /* Global variables for testing */ -size_t n_tests_run_g = 0; -size_t n_tests_passed_g = 0; -size_t n_tests_failed_g = 0; -size_t n_tests_skipped_g = 0; -uint64_t vol_cap_flags_g = H5VL_CAP_FLAG_NONE; +static int TestExpress_g = -1; /* Whether to expedite testing. -1 means not set yet. */ +size_t n_tests_run_g = 0; +size_t n_tests_passed_g = 0; +size_t n_tests_failed_g = 0; +size_t n_tests_skipped_g = 0; +uint64_t vol_cap_flags_g = H5VL_CAP_FLAG_NONE; + +/* Whether h5_cleanup should clean up temporary testing files */ +static bool do_test_file_cleanup_g = true; static herr_t h5_errors(hid_t estack, void *client_data); static char *h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fullname, @@ -215,7 +220,7 @@ h5_cleanup(const char *base_name[], hid_t fapl) { int retval = 0; - if (GetTestCleanup()) { + if (do_test_file_cleanup_g) { /* Clean up files in base_name, and the FAPL */ h5_delete_all_test_files(base_name, fapl); H5Pclose(fapl); @@ -274,7 +279,7 @@ h5_test_init(void) H5Eset_auto2(H5E_DEFAULT, h5_errors, NULL); /* Retrieve the TestExpress mode */ - GetTestExpress(); + TestExpress_g = h5_get_testexpress(); } /* end h5_test_init() */ /*------------------------------------------------------------------------- @@ -460,12 +465,12 @@ h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fu if (isppdriver) { #ifdef H5_HAVE_PARALLEL if (getenv_all(MPI_COMM_WORLD, 0, HDF5_NOCLEANUP)) - SetTestNoCleanup(); + do_test_file_cleanup_g = false; #endif /* H5_HAVE_PARALLEL */ } else { if (getenv(HDF5_NOCLEANUP)) - SetTestNoCleanup(); + do_test_file_cleanup_g = false; } /* Check what prefix to use for test files. Process HDF5_PARAPREFIX and @@ -1008,6 +1013,81 @@ h5_get_libver_fapl(hid_t fapl) return -1; } /* end h5_get_libver_fapl() */ +/* + * Returns the current TestExpress functionality setting. + * Valid values returned are as follows: + * + * 0: Exhaustive run + * Tests should take as long as necessary + * 1: Full run. Default value if H5_TEST_EXPRESS_LEVEL_DEFAULT + * and the HDF5TestExpress environment variable are not defined + * Tests should take no more than 30 minutes + * 2: Quick run + * Tests should take no more than 10 minutes + * 3: Smoke test. + * Default if the HDF5TestExpress environment variable is set to + * a value other than 0-3 + * Tests should take less than 1 minute + * + * If the value returned is > 1, then test programs should + * skip some tests so that they complete sooner. + */ +int +h5_get_testexpress(void) +{ + char *env_val; + int express_val = TestExpress_g; + + /* TestExpress_g is uninitialized if it has a negative value */ + if (express_val < 0) { + /* Default to level 1 if not overridden */ + express_val = 1; + + /* Check if a default test express level is defined (e.g., by build system) */ +#ifdef H5_TEST_EXPRESS_LEVEL_DEFAULT + express_val = H5_TEST_EXPRESS_LEVEL_DEFAULT; + if (express_val < 0) + express_val = 1; /* Reset to default */ + else if (express_val > 3) + express_val = 3; +#endif + } + + /* Check if the HDF5TestExpress environment variable is set to + * override the default level + */ + env_val = getenv("HDF5TestExpress"); + if (env_val) { + if (strcmp(env_val, "0") == 0) + express_val = 0; + else if (strcmp(env_val, "1") == 0) + express_val = 1; + else if (strcmp(env_val, "2") == 0) + express_val = 2; + else + express_val = 3; + } + + return express_val; +} + +/* + * Sets the level of express testing to the given value. Negative + * values are set to the default TestExpress setting (1). Values + * larger than the highest TestExpress setting (3) are set to the + * highest TestExpress setting. + */ +void +h5_set_testexpress(int new_val) +{ + if (new_val < 0) + new_val = 1; /* Reset to default */ + else if (new_val > 3) + new_val = 3; + + TestExpress_g = new_val; +} + /*------------------------------------------------------------------------- * Function: h5_no_hwconv * @@ -1403,24 +1483,6 @@ h5_get_file_size(const char *filename, hid_t fapl) } /* end get_file_size() */ H5_GCC_CLANG_DIAG_ON("format-nonliteral") -/* - * This routine is designed to provide equivalent functionality to 'printf' - * and allow easy replacement for environments which don't have stdin/stdout - * available. (i.e. Windows & the Mac) - */ -H5_ATTR_FORMAT(printf, 1, 2) -int -print_func(const char *format, ...) -{ - va_list arglist; - int ret_value; - - va_start(arglist, format); - ret_value = vprintf(format, arglist); - va_end(arglist); - return ret_value; -} - #ifdef H5_HAVE_FILTER_SZIP /*------------------------------------------------------------------------- diff --git a/test/h5test.h b/test/h5test.h index 1ec537c62e3..1b33664fbf0 100644 --- a/test/h5test.h +++ b/test/h5test.h @@ -24,77 +24,6 @@ #include "H5private.h" #include "H5Eprivate.h" -/* - * Predefined test verbosity levels. - * - * Convention: - * - * The higher the verbosity value, the more information printed. - * So, output for higher verbosity also include output of all lower - * verbosity. - * - * Value Description - * 0 None: No informational message. - * 1 "All tests passed" - * 2 Header of overall test - * 3 Default: header and results of individual test - * 4 - * 5 Low: Major category of tests. - * 6 - * 7 Medium: Minor category of tests such as functions called. - * 8 - * 9 High: Highest level. All information. - */ -#define VERBO_NONE 0 /* None */ -#define VERBO_DEF 3 /* Default */ -#define VERBO_LO 5 /* Low */ -#define VERBO_MED 7 /* Medium */ -#define VERBO_HI 9 /* High */ - -/* - * Verbose queries - * Only None needs an exact match. The rest are at least as much. - */ - -/* A macro version of HDGetTestVerbosity(). */ -/* Should be used internally by the libtest.a only. */ -#define HDGetTestVerbosity() (TestVerbosity) - -#define VERBOSE_NONE (HDGetTestVerbosity() == VERBO_NONE) -#define VERBOSE_DEF (HDGetTestVerbosity() >= VERBO_DEF) -#define VERBOSE_LO (HDGetTestVerbosity() >= VERBO_LO) -#define VERBOSE_MED (HDGetTestVerbosity() >= VERBO_MED) -#define VERBOSE_HI (HDGetTestVerbosity() >= VERBO_HI) - -/* - * The TestExpress mode for the testing framework - * - Values: - 0: Exhaustive run - Tests should take as long as necessary - 1: Full run. Default if H5_TEST_EXPRESS_LEVEL_DEFAULT - and HDF5TestExpress are not defined - Tests should take no more than 30 minutes - 2: Quick run - Tests should take no more than 10 minutes - 3: Smoke test. - Default if HDF5TestExpress is set to a value other than 0-3 - Tests should take less than 1 minute - - Design: - If the environment variable $HDF5TestExpress is defined, - or if a default testing level > 1 has been set via - H5_TEST_EXPRESS_LEVEL_DEFAULT, then test programs should - skip some tests so that they complete sooner. -*/ - -/* - * Test controls definitions. - */ -#define SKIPTEST 1 /* Skip this test */ -#define ONLYTEST 2 /* Do only this test */ -#define BEGINTEST 3 /* Skip all tests before this test */ - /* * This contains the filename prefix specified as command line option for * the parallel test files. @@ -248,9 +177,6 @@ H5TEST_DLLVAR MPI_Info h5_io_info_g; /* MPI INFO object for IO */ goto part_##part_name##_end; \ } while (0) -/* Number of seconds to wait before killing a test (requires alarm(2)) */ -#define H5_ALARM_SEC 1200 /* default is 20 minutes */ - /* Flags for h5_fileaccess_flags() */ #define H5_FILEACCESS_VFD 0x01 #define H5_FILEACCESS_LIBVER 0x02 @@ -302,7 +228,6 @@ H5TEST_DLL const char *h5_rmprefix(const char *filename); H5TEST_DLL void h5_restore_err(void); H5TEST_DLL void h5_show_hostname(void); H5TEST_DLL h5_stat_size_t h5_get_file_size(const char *filename, hid_t fapl); -H5TEST_DLL int print_func(const char *format, ...) H5_ATTR_FORMAT(printf, 1, 2); H5TEST_DLL int h5_make_local_copy(const char *origfilename, const char *local_copy_name); H5TEST_DLL herr_t h5_verify_cached_stabs(const char *base_name[], hid_t fapl); H5TEST_DLL H5FD_class_t *h5_get_dummy_vfd_class(void); @@ -339,33 +264,9 @@ H5TEST_DLL void h5_delete_all_test_files(const char *base_name[], hid_t fapl); * including resetting the library by closing it */ H5TEST_DLL void h5_test_init(void); -/* Routines for operating on the list of tests (for the "all in one" tests) */ -H5TEST_DLL void TestUsage(void); -H5TEST_DLL void AddTest(const char *TheName, void (*TheCall)(void), void (*Cleanup)(void), - const char *TheDescr, const void *Parameters); -H5TEST_DLL void TestInfo(const char *ProgName); -H5TEST_DLL void TestParseCmdLine(int argc, char *argv[]); -H5TEST_DLL void PerformTests(void); -H5TEST_DLL void TestSummary(void); -H5TEST_DLL void TestCleanup(void); -H5TEST_DLL void TestShutdown(void); -H5TEST_DLL void TestInit(const char *ProgName, void (*private_usage)(void), - int (*private_parser)(int ac, char *av[])); -H5TEST_DLL int GetTestVerbosity(void); -H5TEST_DLL int SetTestVerbosity(int newval); -H5TEST_DLL int GetTestSummary(void); -H5TEST_DLL int GetTestCleanup(void); -H5TEST_DLL int SetTestNoCleanup(void); -H5TEST_DLL int GetTestExpress(void); -H5TEST_DLL int SetTestExpress(int newval); -H5TEST_DLL void ParseTestVerbosity(char *argv); -H5TEST_DLL int GetTestNumErrs(void); -H5TEST_DLL void IncTestNumErrs(void); -H5TEST_DLL const void *GetTestParameters(void); -H5TEST_DLL int TestErrPrintf(const char *format, ...) H5_ATTR_FORMAT(printf, 1, 2); -H5TEST_DLL void SetTest(const char *testname, int action); -H5TEST_DLL void TestAlarmOn(void); -H5TEST_DLL void TestAlarmOff(void); +/* Functions that deal with expediting testing */ +H5TEST_DLL int h5_get_testexpress(void); +H5TEST_DLL void h5_set_testexpress(int new_val); #ifdef H5_HAVE_FILTER_SZIP H5TEST_DLL int h5_szip_can_encode(void); @@ -378,14 +279,11 @@ H5TEST_DLL char *getenv_all(MPI_Comm comm, int root, const char *name); #endif /* Extern global variables */ -H5TEST_DLLVAR int TestExpress; -H5TEST_DLLVAR int TestVerbosity; H5TEST_DLLVAR size_t n_tests_run_g; H5TEST_DLLVAR size_t n_tests_passed_g; H5TEST_DLLVAR size_t n_tests_failed_g; H5TEST_DLLVAR size_t n_tests_skipped_g; H5TEST_DLLVAR uint64_t vol_cap_flags_g; -H5TEST_DLLVAR int mpi_rank_framework_g; H5TEST_DLL void h5_send_message(const char *file, const char *arg1, const char *arg2); H5TEST_DLL herr_t h5_wait_message(const char *file); diff --git a/test/hdfs.c b/test/hdfs.c index 956621f384c..bf67e045abc 100644 --- a/test/hdfs.c +++ b/test/hdfs.c @@ -588,10 +588,10 @@ test_hdfs_fapl(void) * test-local variables * ************************/ - hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */ - hid_t driver_id = H5I_INVALID_HID; /* ID for this VFD */ - unsigned long driver_flags = 0; /* VFD feature flags */ - H5FD_hdfs_fapl_t hdfs_fa_0 = { + hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */ + hid_t driver_id = H5I_INVALID_HID; /* ID for this VFD */ + unsigned long driver_flags = 0; /* VFD feature flags */ + H5FD_hdfs_fapl_t hdfs_fa_0 = { 1, /* version*/ "", /* node name */ 9000, /* node port */ @@ -672,7 +672,7 @@ test_vfd_open(void) * test-local macros * *********************/ -/* selectors for which fapl to use in testcase */ + /* selectors for which fapl to use in testcase */ #define FAPL_H5P_DEFAULT -2 #define FAPL_UNCONFIGURED -3 /* H5P_FILE_ACCESS */ #define FAPL_HDFS -4 @@ -684,10 +684,10 @@ test_vfd_open(void) struct test_condition { const char *message; const char *url; - unsigned flags; - int which_fapl; - haddr_t maxaddr; - bool might_use_other_driver; + unsigned flags; + int which_fapl; + haddr_t maxaddr; + bool might_use_other_driver; }; /************************ @@ -776,11 +776,11 @@ test_vfd_open(void) false, }, }; - unsigned i = 0; + unsigned i = 0; unsigned failing_conditions_count = 10; - H5FD_t *fd = NULL; - hid_t fapl_hdfs = H5I_INVALID_HID; - hid_t fapl_unconfigured = H5I_INVALID_HID; + H5FD_t *fd = NULL; + hid_t fapl_hdfs = H5I_INVALID_HID; + hid_t fapl_unconfigured = H5I_INVALID_HID; TESTING("HDFS VFD-level open"); @@ -798,8 +798,8 @@ test_vfd_open(void) /* all the test cases that will _not_ open */ for (i = 0; i < failing_conditions_count; i++) { - struct test_condition T = failing_conditions[i]; - hid_t fapl_id = H5P_DEFAULT; + struct test_condition T = failing_conditions[i]; + hid_t fapl_id = H5P_DEFAULT; fd = NULL; @@ -934,7 +934,7 @@ test_eof_eoa(void) ************************/ H5FD_t *fd_shakespeare = NULL; - hid_t fapl_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; TESTING("HDFS eof/eoa gets and sets"); @@ -1031,10 +1031,10 @@ test_H5FDread_without_eoa_set_fails(void) #else - char buffer[HDFS_TEST_MAX_BUF_SIZE]; - unsigned int i = 0; - H5FD_t *file_shakespeare = NULL; - hid_t fapl_id = H5I_INVALID_HID; + char buffer[HDFS_TEST_MAX_BUF_SIZE]; + unsigned int i = 0; + H5FD_t *file_shakespeare = NULL; + hid_t fapl_id = H5I_INVALID_HID; TESTING("HDFS VFD read-eoa temporal coupling library limitation"); @@ -1139,10 +1139,10 @@ test_read(void) *************************/ struct testcase { const char *message; /* purpose of test case */ - haddr_t eoa_set; /* set file EOA to this prior to read */ - size_t addr; /* offset of read in file */ - size_t len; /* length of read in file */ - herr_t success; /* expected return value of read function */ + haddr_t eoa_set; /* set file EOA to this prior to read */ + size_t addr; /* offset of read in file */ + size_t len; /* length of read in file */ + herr_t success; /* expected return value of read function */ const char *expected; /* expected contents of buffer; failure ignores */ }; @@ -1199,14 +1199,14 @@ test_read(void) NULL, }, }; - unsigned testcase_count = 6; - unsigned test_i = 0; + unsigned testcase_count = 6; + unsigned test_i = 0; struct testcase test; - herr_t open_return = FAIL; - char buffer[HDFS_TEST_MAX_BUF_SIZE]; - unsigned int i = 0; - H5FD_t *file_raven = NULL; - hid_t fapl_id = H5I_INVALID_HID; + herr_t open_return = FAIL; + char buffer[HDFS_TEST_MAX_BUF_SIZE]; + unsigned int i = 0; + H5FD_t *file_raven = NULL; + hid_t fapl_id = H5I_INVALID_HID; TESTING("HDFS VFD read/range-gets"); @@ -1243,7 +1243,7 @@ test_read(void) * per-test setup * * -------------- */ - test = cases[test_i]; + test = cases[test_i]; open_return = FAIL; FAIL_IF(HDFS_TEST_MAX_BUF_SIZE < test.len) /* buffer too small! */ @@ -1349,8 +1349,8 @@ test_noops_and_autofails(void) * test-local variables * ************************/ - hid_t fapl_id = H5I_INVALID_HID; - H5FD_t *file = NULL; + hid_t fapl_id = H5I_INVALID_HID; + H5FD_t *file = NULL; const char data[36] = "The Force shall be with you, always"; TESTING("HDFS VFD always-fail and no-op routines"); @@ -1485,7 +1485,7 @@ test_H5F_integration(void) * test-local variables * ************************/ - hid_t file = H5I_INVALID_HID; + hid_t file = H5I_INVALID_HID; hid_t fapl_id = H5I_INVALID_HID; TESTING("HDFS file access through HD5F library (H5F API)"); diff --git a/test/objcopy.c b/test/objcopy.c index 798ba0c8123..88a709ad5ee 100644 --- a/test/objcopy.c +++ b/test/objcopy.c @@ -14,7 +14,7 @@ * Purpose: Test H5Ocopy(). */ -#include "testhdf5.h" +#include "h5test.h" #include "H5srcdir.h" #include "H5Iprivate.h" @@ -10063,14 +10063,14 @@ test_copy_dataset_compact_vl_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, for (i = 0; i < DIM_SIZE_1; i++) { buf[i].p = malloc((i + 1) * sizeof(hvl_t)); if (buf[i].p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i); + fprintf(stderr, "Cannot allocate memory for VL data! i=%u\n", i); return 1; } /* end if */ buf[i].len = i + 1; for (tvl = (hvl_t *)buf[i].p, j = 0; j < (i + 1); j++, tvl++) { tvl->p = malloc((j + 1) * sizeof(unsigned int)); if (tvl->p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); + fprintf(stderr, "Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); return 1; } /* end if */ tvl->len = j + 1; @@ -10260,14 +10260,14 @@ test_copy_dataset_contig_vl_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, h for (i = 0; i < DIM_SIZE_1; i++) { buf[i].p = malloc((i + 1) * sizeof(hvl_t)); if (buf[i].p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i); + fprintf(stderr, "Cannot allocate memory for VL data! i=%u\n", i); TEST_ERROR; } /* end if */ buf[i].len = i + 1; for (tvl = (hvl_t *)buf[i].p, j = 0; j < (i + 1); j++, tvl++) { tvl->p = malloc((j + 1) * sizeof(unsigned int)); if (tvl->p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); + fprintf(stderr, "Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); TEST_ERROR; } /* end if */ tvl->len = j + 1; @@ -10452,14 +10452,14 @@ test_copy_dataset_chunked_vl_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, for (i = 0; i < DIM_SIZE_1; i++) { buf[i].p = malloc((i + 1) * sizeof(hvl_t)); if (buf[i].p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i); + fprintf(stderr, "Cannot allocate memory for VL data! i=%u\n", i); TEST_ERROR; } /* end if */ buf[i].len = i + 1; for (tvl = (hvl_t *)buf[i].p, j = 0; j < (i + 1); j++, tvl++) { tvl->p = malloc((j + 1) * sizeof(unsigned int)); if (tvl->p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); + fprintf(stderr, "Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); TEST_ERROR; } /* end if */ tvl->len = j + 1; @@ -10693,14 +10693,14 @@ test_copy_dataset_compressed_vl_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fap for (i = 0; i < DIM_SIZE_1; i++) { buf[i].p = malloc((i + 1) * sizeof(hvl_t)); if (buf[i].p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i); + fprintf(stderr, "Cannot allocate memory for VL data! i=%u\n", i); TEST_ERROR; } /* end if */ buf[i].len = i + 1; for (tvl = (hvl_t *)buf[i].p, j = 0; j < (i + 1); j++, tvl++) { tvl->p = malloc((j + 1) * sizeof(unsigned int)); if (tvl->p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); + fprintf(stderr, "Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); TEST_ERROR; } /* end if */ tvl->len = j + 1; @@ -17136,9 +17136,6 @@ main(void) if (h5_driver_is_default_vfd_compatible(fapl, &driver_is_default_compatible) < 0) TEST_ERROR; - if (TestExpress > 0) - printf("***Express test mode %d. Some tests may be skipped\n", TestExpress); - /* Copy the file access property list */ if ((fapl2 = H5Pcopy(fapl)) < 0) TEST_ERROR; diff --git a/test/objcopy_ref.c b/test/objcopy_ref.c index 55dc23b8ea3..0aed2a0d010 100644 --- a/test/objcopy_ref.c +++ b/test/objcopy_ref.c @@ -14,7 +14,7 @@ * Purpose: Test H5Ocopy() for references. */ -#include "testhdf5.h" +#include "h5test.h" #define H5F_FRIEND /*suppress error about including H5Fpkg */ #define H5F_TESTING @@ -1823,9 +1823,6 @@ main(void) h5_test_init(); fapl = h5_fileaccess(); - if (TestExpress > 0) - printf("***Express test mode %d. Some tests may be skipped\n", TestExpress); - /* Copy the file access property list */ if ((fapl2 = H5Pcopy(fapl)) < 0) TEST_ERROR; diff --git a/test/select_io_dset.c b/test/select_io_dset.c index f9a4975eb50..c17d3b03a21 100644 --- a/test/select_io_dset.c +++ b/test/select_io_dset.c @@ -14,7 +14,7 @@ * Purpose: Tests selection IO for the dataset interface (H5D) */ -#include "testhdf5.h" +#include "h5test.h" #include "H5srcdir.h" const char *FILENAME[] = {"select_io", /* 0 */ diff --git a/test/set_extent.c b/test/set_extent.c index bee03140cb3..80cc57a6ceb 100644 --- a/test/set_extent.c +++ b/test/set_extent.c @@ -2316,7 +2316,7 @@ test_random_rank4(hid_t fapl, hid_t dcpl, hid_t dxpl, bool do_fillvalue, bool di } *wbuf = NULL; /* Write buffer */ struct { hsize_t arr[RAND4_NITER + 1][4]; - } *dim_log = NULL; /* Log of dataset dimensions */ + } *dim_log = NULL; /* Log of dataset dimensions */ bool zero_dim = false; /* Whether a dimension is 0 */ bool writing = true; /* Whether we're writing to the dset */ unsigned scalar_iter; /* Iteration to shrink dset to 1x1x1x1 */ @@ -2533,7 +2533,7 @@ test_random_rank4_vl(hid_t fapl, hid_t dcpl, hid_t dxpl, bool do_fillvalue, bool } *wbuf = NULL; /* Write buffer */ struct { hsize_t arr[RAND4_NITER + 1][4]; - } *dim_log = NULL; /* Log of dataset dimensions */ + } *dim_log = NULL; /* Log of dataset dimensions */ bool zero_dim = false; /* Whether a dimension is 0 */ bool writing = true; /* Whether we're writing to the dset */ hvl_t fill_value; /* Fill value */ diff --git a/test/stab.c b/test/stab.c index 3bae4d75046..c48474874c4 100644 --- a/test/stab.c +++ b/test/stab.c @@ -1466,10 +1466,9 @@ main(void) puts("All symbol table tests passed."); /* Cleanup */ - if (GetTestCleanup()) { - HDremove(FILE_OLD_GROUPS_COPY); - HDremove(CORRUPT_STAB_TMP_FILE); - } + HDremove(FILE_OLD_GROUPS_COPY); + HDremove(CORRUPT_STAB_TMP_FILE); + h5_cleanup(FILENAME, fapl); return 0; diff --git a/test/tarray.c b/test/tarray.c index 0f9e3e44faa..8f370cb4846 100644 --- a/test/tarray.c +++ b/test/tarray.c @@ -2217,7 +2217,7 @@ test_compat(void) *------------------------------------------------------------------------- */ void -test_array(void) +test_array(const void H5_ATTR_UNUSED *params) { /* Output message about test being performed */ MESSAGE(5, ("Testing Array Datatypes\n")); @@ -2249,11 +2249,13 @@ test_array(void) *------------------------------------------------------------------------- */ void -cleanup_array(void) +cleanup_array(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(FILENAME, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } /* end cleanup_array() */ diff --git a/test/tattr.c b/test/tattr.c index e15ed5fb736..4831e1604b0 100644 --- a/test/tattr.c +++ b/test/tattr.c @@ -4009,7 +4009,16 @@ test_attr_big(hid_t fcpl, hid_t fapl) /* Create attribute */ u = 2; snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, big_sid, H5P_DEFAULT, H5P_DEFAULT); + + if (vol_is_native && low != H5F_LIBVER_LATEST) { + H5E_BEGIN_TRY + { + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, big_sid, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY + } + else + attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, big_sid, H5P_DEFAULT, H5P_DEFAULT); if (low == H5F_LIBVER_LATEST) { CHECK(attr, FAIL, "H5Acreate2"); @@ -11928,7 +11937,7 @@ test_attr_delete_last_dense(hid_t fcpl, hid_t fapl) ** ****************************************************************/ void -test_attr(void) +test_attr(const void H5_ATTR_UNUSED *params) { hid_t fapl = (H5I_INVALID_HID), fapl2 = (H5I_INVALID_HID); /* File access property lists */ hid_t fcpl = (H5I_INVALID_HID), fcpl2 = (H5I_INVALID_HID); /* File creation property lists */ @@ -12157,11 +12166,13 @@ test_attr(void) *------------------------------------------------------------------------- */ void -cleanup_attr(void) +cleanup_attr(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(FILENAME, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/tchecksum.c b/test/tchecksum.c index 62db33c1fba..5ee7c0f6d6e 100644 --- a/test/tchecksum.c +++ b/test/tchecksum.c @@ -216,7 +216,7 @@ test_chksum_large(void) ** ****************************************************************/ void -test_checksum(void) +test_checksum(const void H5_ATTR_UNUSED *params) { /* Output message about test being performed */ MESSAGE(5, ("Testing checksum algorithms\n")); @@ -240,7 +240,7 @@ test_checksum(void) *------------------------------------------------------------------------- */ void -cleanup_checksum(void) +cleanup_checksum(void H5_ATTR_UNUSED *params) { /* no file to clean */ } diff --git a/test/tconfig.c b/test/tconfig.c index 25e9ad50efe..4cc796dcb43 100644 --- a/test/tconfig.c +++ b/test/tconfig.c @@ -59,7 +59,7 @@ void test_exit_definitions(void); *------------------------------------------------------------------------- */ void -test_configure(void) +test_configure(const void H5_ATTR_UNUSED *params) { /* Output message about test being performed */ MESSAGE(5, ("Testing configure definitions\n")); @@ -77,7 +77,7 @@ test_configure(void) *------------------------------------------------------------------------- */ void -cleanup_configure(void) +cleanup_configure(void H5_ATTR_UNUSED *params) { /* no file to clean */ } diff --git a/test/tcoords.c b/test/tcoords.c index 1a3a8b3658d..c1d5c4083f2 100644 --- a/test/tcoords.c +++ b/test/tcoords.c @@ -386,7 +386,7 @@ test_multiple_ends(hid_t file, bool is_chunked) /* For testing the full selections in the fastest-growing end and in the middle dimensions */ struct { int arr[1][1][1][4][2][1][6][2]; - } *mem1_buffer = NULL; + } *mem1_buffer = NULL; hsize_t mem1_dims[8] = {1, 1, 1, 4, 2, 1, 6, 2}; hsize_t mem1_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; hsize_t mem1_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; @@ -396,7 +396,7 @@ test_multiple_ends(hid_t file, bool is_chunked) /* For testing the full selections in the slowest-growing end and in the middle dimensions */ struct { int arr[4][5][1][4][2][1][1][1]; - } *mem2_buffer = NULL; + } *mem2_buffer = NULL; hsize_t mem2_dims[8] = {4, 5, 1, 4, 2, 1, 1, 1}; hsize_t mem2_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; hsize_t mem2_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; @@ -406,7 +406,7 @@ test_multiple_ends(hid_t file, bool is_chunked) /* For testing two unadjacent full selections in the middle dimensions */ struct { int arr[1][5][3][1][1][3][6][1]; - } *mem3_buffer = NULL; + } *mem3_buffer = NULL; hsize_t mem3_dims[8] = {1, 5, 3, 1, 1, 3, 6, 1}; hsize_t mem3_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; hsize_t mem3_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; @@ -416,7 +416,7 @@ test_multiple_ends(hid_t file, bool is_chunked) /* For testing the full selections in the fastest-growing end and the slowest-growing end */ struct { int arr[4][5][1][1][1][1][6][2]; - } *mem4_buffer = NULL; + } *mem4_buffer = NULL; hsize_t mem4_dims[8] = {4, 5, 1, 1, 1, 1, 6, 2}; hsize_t mem4_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; hsize_t mem4_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; @@ -427,7 +427,7 @@ test_multiple_ends(hid_t file, bool is_chunked) * also in the middle dimensions */ struct { int arr[4][5][1][4][2][1][6][2]; - } *mem5_buffer = NULL; + } *mem5_buffer = NULL; hsize_t mem5_dims[8] = {4, 5, 1, 4, 2, 1, 6, 2}; hsize_t mem5_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; hsize_t mem5_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; @@ -678,7 +678,7 @@ test_multiple_ends(hid_t file, bool is_chunked) ** ****************************************************************/ void -test_coords(void) +test_coords(const void H5_ATTR_UNUSED *params) { hid_t fid; bool is_chunk[2] = {true, false}; @@ -713,11 +713,13 @@ test_coords(void) *------------------------------------------------------------------------- */ void -cleanup_coords(void) +cleanup_coords(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(FILENAME, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/testfiles/err_compat_1 b/test/testfiles/err_compat_1 index fa02bcd1039..3ed81d1f051 100644 --- a/test/testfiles/err_compat_1 +++ b/test/testfiles/err_compat_1 @@ -22,52 +22,64 @@ HDF5-DIAG: Error detected in HDF5 (version (number)): #002: (file name) line (number) in H5VL_setup_acc_args(): invalid location identifier major: Invalid arguments to routine minor: Inappropriate type - #003: (file name) line (number) in H5VL_vol_object(): invalid identifier type to function + #003: (file name) line (number) in H5VL_vol_object(): can't retrieve object for ID + major: Virtual Object Layer + minor: Can't get value + #004: (file name) line (number) in H5VL_vol_object_verify(): invalid identifier type to function major: Invalid arguments to routine minor: Inappropriate type ********* Print error stack in customized way ********* - error #000: (file name) in H5VL_vol_object(): line (number) + error #000: (file name) in H5VL_vol_object_verify(): line (number) major: Invalid arguments to routine minor: Inappropriate type - error #001: (file name) in H5VL_setup_acc_args(): line (number) + error #001: (file name) in H5VL_vol_object(): line (number) + major: Virtual Object Layer + minor: Can't get value + error #002: (file name) in H5VL_setup_acc_args(): line (number) major: Invalid arguments to routine minor: Inappropriate type - error #002: (file name) in H5D__create_api_common(): line (number) + error #003: (file name) in H5D__create_api_common(): line (number) major: Dataset minor: Can't set value - error #003: (file name) in H5Dcreate2(): line (number) + error #004: (file name) in H5Dcreate2(): line (number) major: Dataset minor: Unable to create file ********* Print error stack in customized way ********* - error #000: (file name) in H5VL_vol_object(): line (number) + error #000: (file name) in H5VL_vol_object_verify(): line (number) major: Invalid arguments to routine minor: Inappropriate type - error #001: (file name) in H5VL_setup_acc_args(): line (number) + error #001: (file name) in H5VL_vol_object(): line (number) + major: Virtual Object Layer + minor: Can't get value + error #002: (file name) in H5VL_setup_acc_args(): line (number) major: Invalid arguments to routine minor: Inappropriate type - error #002: (file name) in H5D__create_api_common(): line (number) + error #003: (file name) in H5D__create_api_common(): line (number) major: Dataset minor: Can't set value - error #003: (file name) in H5Dcreate2(): line (number) + error #004: (file name) in H5Dcreate2(): line (number) major: Dataset minor: Unable to create file - error #004: (file name) in H5Eget_auto(1 or 2)(): line (number) + error #005: (file name) in H5Eget_auto(1 or 2)(): line (number) major: Error API minor: Can't get value ********* Print error stack in customized way ********* - error #000: (file name) in H5VL_vol_object(): line (number) + error #000: (file name) in H5VL_vol_object_verify(): line (number) major: Invalid arguments to routine minor: Inappropriate type - error #001: (file name) in H5VL_setup_acc_args(): line (number) + error #001: (file name) in H5VL_vol_object(): line (number) + major: Virtual Object Layer + minor: Can't get value + error #002: (file name) in H5VL_setup_acc_args(): line (number) major: Invalid arguments to routine minor: Inappropriate type - error #002: (file name) in H5D__create_api_common(): line (number) + error #003: (file name) in H5D__create_api_common(): line (number) major: Dataset minor: Can't set value - error #003: (file name) in H5Dcreate2(): line (number) + error #004: (file name) in H5Dcreate2(): line (number) major: Dataset minor: Unable to create file HDF5-DIAG: Error detected in HDF5 (version (number)): @@ -80,7 +92,10 @@ HDF5-DIAG: Error detected in HDF5 (version (number)): #002: (file name) line (number) in H5VL_setup_acc_args(): invalid location identifier major: Invalid arguments to routine minor: Inappropriate type - #003: (file name) line (number) in H5VL_vol_object(): invalid identifier type to function + #003: (file name) line (number) in H5VL_vol_object(): can't retrieve object for ID + major: Virtual Object Layer + minor: Can't get value + #004: (file name) line (number) in H5VL_vol_object_verify(): invalid identifier type to function major: Invalid arguments to routine minor: Inappropriate type HDF5-DIAG: Error detected in HDF5 (version (number)): diff --git a/test/testframe.c b/test/testframe.c index 50daede8eea..a630a179b9e 100644 --- a/test/testframe.c +++ b/test/testframe.c @@ -11,276 +11,379 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: Provides support functions for the testing framework. - * + * Purpose: Implements a basic testing framework for HDF5 tests to use. */ -#include "testhdf5.h" +#include "testframe.h" +#include "h5test.h" /* * Definitions for the testing structure. */ -#define MAXTESTNAME 16 -#define MAXTESTDESC 64 typedef struct TestStruct { - int NumErrors; - char Description[MAXTESTDESC]; - int SkipFlag; char Name[MAXTESTNAME]; - void (*Call)(void); - void (*Cleanup)(void); - const void *Parameters; + char Description[MAXTESTDESC]; + void (*TestFunc)(const void *); + void (*TestSetupFunc)(void *); + void (*TestCleanupFunc)(void *); + void *TestParameters; + int TestNumErrors; + int TestSkipFlag; } TestStruct; /* - * Variables used by testing framework. + * Global variables used by testing framework. */ -static int enable_error_stack = 0; /* enable error stack; disable=0 enable=1 */ -static int num_errs = 0; /* Total number of errors during testing */ -int TestVerbosity = VERBO_DEF; /* Default Verbosity is Low */ -static int Summary = 0; /* Show test summary. Default is no. */ -static int CleanUp = 1; /* Do cleanup or not. Default is yes. */ -int TestExpress = -1; /* Do TestExpress or not. -1 means not set yet. */ -static TestStruct *Test = NULL; /* Array of tests */ -static unsigned TestAlloc = 0; /* Size of the Test array */ -static unsigned Index = 0; -static const void *Test_parameters = NULL; -static const char *TestProgName = NULL; -static void (*TestPrivateUsage)(void) = NULL; -static int (*TestPrivateParser)(int ac, char *av[]) = NULL; -int mpi_rank_framework_g = 0; + +static TestStruct *TestArray = NULL; /* Array of tests */ +static unsigned TestAlloc = 0; /* Size of the Test array */ +static unsigned TestCount = 0; /* Number of tests currently added to test array */ + +static const char *TestProgName = NULL; +static void (*TestPrivateUsage_g)(FILE *stream) = NULL; +static herr_t (*TestPrivateParser_g)(int argc, char *argv[]) = NULL; +static herr_t (*TestCleanupFunc_g)(void) = NULL; + +static int TestNumErrs_g = 0; /* Total number of errors that occurred for whole test program */ +static bool TestEnableErrorStack = true; /* Whether to show error stacks from the library */ + +static int TestMaxNumThreads_g = -1; /* Max number of threads that can be spawned */ + +static bool TestDoSummary_g = false; /* Show test summary. Default is no. */ +static bool TestDoCleanUp_g = true; /* Do cleanup or not. Default is yes. */ + +int TestFrameworkProcessID_g = 0; /* MPI process rank value for parallel tests */ +int TestVerbosity_g = VERBO_DEF; /* Default Verbosity is Low */ /* - * Setup a test function and add it to the list of tests. - * It must have no parameters and returns void. - * TheName--short test name. - * If the name starts with '-', do not run it by default. - * TheCall--the test routine. - * Cleanup--the cleanup routine for the test. - * TheDescr--Long description of the test. - * Parameters--pointer to extra parameters. Use NULL if none used. - * Since only the pointer is copied, the contents should not change. - * Return: Void - * exit EXIT_FAILURE if error is encountered. + * Add a new test to the list of tests to be executed */ -void -AddTest(const char *TheName, void (*TheCall)(void), void (*Cleanup)(void), const char *TheDescr, - const void *Parameters) +herr_t +AddTest(const char *TestName, void (*TestFunc)(const void *), void (*TestSetupFunc)(void *), + void (*TestCleanupFunc)(void *), const void *TestData, size_t TestDataSize, const char *TestDescr) { - /* Sanity checking */ - if (strlen(TheDescr) >= MAXTESTDESC) { - printf("Test description ('%s') too long, increase MAXTESTDESC(%d).\n", TheDescr, MAXTESTDESC); - exit(EXIT_FAILURE); + void *new_test_data = NULL; + + if (*TestName == '\0') { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: empty string given for test name\n", __func__); + return FAIL; } - if (strlen(TheName) >= MAXTESTNAME) { - printf("Test name too long, increase MAXTESTNAME(%d).\n", MAXTESTNAME); - exit(EXIT_FAILURE); + if (strlen(TestName) >= MAXTESTNAME) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: test name ('%s') too long, increase MAXTESTNAME(%d).\n", __func__, TestName, + MAXTESTNAME); + return FAIL; + } + if (strlen(TestDescr) >= MAXTESTDESC) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: test description ('%s') too long, increase MAXTESTDESC(%d).\n", __func__, + TestDescr, MAXTESTDESC); + return FAIL; + } + if ((TestData && (0 == TestDataSize)) || (!TestData && (0 != TestDataSize))) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: invalid test data size (%zu)\n", __func__, TestDataSize); + return FAIL; } - /* Check for increasing the Test array size */ - if (Index >= TestAlloc) { - TestStruct *newTest = Test; /* New array of tests */ - unsigned newAlloc = MAX(1, TestAlloc * 2); /* New array size */ - - /* Reallocate array */ - if (NULL == (newTest = (TestStruct *)realloc(Test, newAlloc * sizeof(TestStruct)))) { - printf("Out of memory for tests, Index = %u, TestAlloc = %u, newAlloc = %u\n", Index, TestAlloc, - newAlloc); - exit(EXIT_FAILURE); + /* Re-allocate test array if necessary */ + if (TestCount >= TestAlloc) { + TestStruct *newTest = TestArray; + unsigned newAlloc = MAX(1, TestAlloc * 2); + + if (NULL == (newTest = realloc(TestArray, newAlloc * sizeof(TestStruct)))) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, + "%s: couldn't reallocate test array, TestCount = %u, TestAlloc = %u, newAlloc = %u\n", + __func__, TestCount, TestAlloc, newAlloc); + return FAIL; } - /* Update info */ - Test = newTest; + TestArray = newTest; TestAlloc = newAlloc; } - /* Set up test function */ - strcpy(Test[Index].Description, TheDescr); - if (*TheName != '-') { - strcpy(Test[Index].Name, TheName); - Test[Index].SkipFlag = 0; + /* If the test name begins with '-', skip the test by default */ + if (*TestName == '-') { + TestArray[TestCount].TestSkipFlag = 1; + TestName++; } - else { /* skip test by default */ - strcpy(Test[Index].Name, TheName + 1); - Test[Index].SkipFlag = 1; + else + TestArray[TestCount].TestSkipFlag = 0; + + strcpy(TestArray[TestCount].Name, TestName); + strcpy(TestArray[TestCount].Description, TestDescr); + + /* Make a copy of the additional test data given */ + if (TestData) { + if (NULL == (new_test_data = malloc(TestDataSize))) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: couldn't allocate space for additional test data\n", __func__); + return FAIL; + } + + memcpy(new_test_data, TestData, TestDataSize); } - Test[Index].Call = TheCall; - Test[Index].Cleanup = Cleanup; - Test[Index].NumErrors = -1; - Test[Index].Parameters = Parameters; + TestArray[TestCount].TestParameters = new_test_data; + + TestArray[TestCount].TestFunc = TestFunc; + TestArray[TestCount].TestSetupFunc = TestSetupFunc; + TestArray[TestCount].TestCleanupFunc = TestCleanupFunc; + TestArray[TestCount].TestNumErrors = -1; - /* Increment test count */ - Index++; + TestCount++; + + return SUCCEED; } /* - * Initialize testing framework - * - * ProgName: Name of test program. - * private_usage: Optional routine provided by test program to print the - * private portion of usage page. Default to NULL which means none is - * provided. - * private_parser: Optional routine provided by test program to parse the - * private options. Default to NULL which means none is provided. - * + * Initialize the testing framework */ -void -TestInit(const char *ProgName, void (*private_usage)(void), int (*private_parser)(int ac, char *av[])) +herr_t +TestInit(const char *ProgName, void (*TestPrivateUsage)(FILE *stream), + herr_t (*TestPrivateParser)(int argc, char *argv[]), herr_t (*TestSetupFunc)(void), + herr_t (*TestCleanupFunc)(void), int TestProcessID) { - /* - * Turn off automatic error reporting since we do it ourselves. Besides, - * half the functions this test calls are private, so automatic error - * reporting wouldn't do much good since it's triggered at the API layer. - */ - if (enable_error_stack == 0) - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); + /* Turn off automatic error reporting if requested */ + if (!TestEnableErrorStack) { + if (H5Eset_auto2(H5E_DEFAULT, NULL, NULL) < 0) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: can't disable error stack\n", __func__); + return FAIL; + } + } - /* - * Record the program name and private routines if provided. - */ + /* Initialize value for TestExpress functionality */ + h5_get_testexpress(); + + /* Record the program name and private routines if provided. */ TestProgName = ProgName; - if (NULL != private_usage) - TestPrivateUsage = private_usage; - if (NULL != private_parser) - TestPrivateParser = private_parser; + if (NULL != TestPrivateUsage) + TestPrivateUsage_g = TestPrivateUsage; + if (NULL != TestPrivateParser) + TestPrivateParser_g = TestPrivateParser; + TestCleanupFunc_g = TestCleanupFunc; + + /* Set process ID for later use */ + TestFrameworkProcessID_g = TestProcessID; + + /* Set/reset global variables from h5test that may be used by + * tests integrated with the testing framework + */ + n_tests_run_g = 0; + n_tests_passed_g = 0; + n_tests_failed_g = 0; + n_tests_skipped_g = 0; + + /* Call test framework setup callback if provided */ + if (TestSetupFunc && TestSetupFunc() < 0) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: error occurred in test framework initialization callback\n", __func__); + return FAIL; + } + + return SUCCEED; } /* - * Print test usage. - * First print the common test options, then the extra options if provided. + * Print out test program usage help text */ void -TestUsage(void) +TestUsage(FILE *stream) { - unsigned i; - - if (mpi_rank_framework_g == 0) { - print_func("Usage: %s [-v[erbose] (l[ow]|m[edium]|h[igh]|0-9)] %s\n", TestProgName, - (TestPrivateUsage ? "" : "")); - print_func(" [-[e]x[clude] name]+ \n"); - print_func(" [-o[nly] name]+ \n"); - print_func(" [-b[egin] name] \n"); - print_func(" [-s[ummary]] \n"); - print_func(" [-c[leanoff]] \n"); - print_func(" [-h[elp]] \n"); - print_func("\n\n"); - print_func("verbose controls the amount of information displayed\n"); - print_func("exclude to exclude tests by name\n"); - print_func("only to name tests which should be run\n"); - print_func("begin start at the name of the test given\n"); - print_func("summary prints a summary of test results at the end\n"); - print_func("cleanoff does not delete *.hdf files after execution of tests\n"); - print_func("help print out this information\n"); - if (TestPrivateUsage) { - print_func("\nExtra options\n"); - TestPrivateUsage(); - } - print_func("\n\n"); - print_func("This program currently tests the following: \n\n"); - print_func("%16s %s\n", "Name", "Description"); - print_func("%16s %s\n", "----", "-----------"); + size_t max_test_name_len = 0; + + /* If running in parallel, only print output from a single MPI process */ + if (TestFrameworkProcessID_g != 0) + return; + + if (!stream) + stream = stdout; + + fprintf(stream, "Usage: %s [-v[erbose] (l[ow]|m[edium]|h[igh]|0-9)] %s\n", TestProgName, + (TestPrivateUsage_g ? "" : "")); + fprintf(stream, " [-[e]x[clude] name]+ \n"); + fprintf(stream, " [-o[nly] name]+ \n"); + fprintf(stream, " [-b[egin] name] \n"); + fprintf(stream, " [-[max]t[hreads]] \n"); + fprintf(stream, " [-s[ummary]] \n"); + fprintf(stream, " [-c[leanoff]] \n"); + fprintf(stream, " [-h[elp]] \n"); + fprintf(stream, "\n\n"); + fprintf(stream, "verbose controls the amount of information displayed\n"); + fprintf(stream, "exclude to exclude tests by name\n"); + fprintf(stream, "only to name tests which should be run\n"); + fprintf(stream, "begin start at the name of the test given\n"); + fprintf(stream, "maxthreads maximum number of threads to be used by multi-thread tests\n"); + fprintf(stream, "summary prints a summary of test results at the end\n"); + fprintf(stream, "cleanoff does not delete *.hdf files after execution of tests\n"); + fprintf(stream, "help print out this information\n"); + if (TestPrivateUsage_g) { + fprintf(stream, "\nExtra options\n"); + TestPrivateUsage_g(stream); + } + fprintf(stream, "\n\n"); - for (i = 0; i < Index; i++) - print_func("%16s %s\n", Test[i].Name, Test[i].Description); + /* Collect some information for cleaner printing */ + for (unsigned Loop = 0; Loop < TestCount; Loop++) { + size_t test_name_len = strlen(TestArray[Loop].Name); - print_func("\n\n"); + if (test_name_len > max_test_name_len) + max_test_name_len = test_name_len; } + + fprintf(stream, "This program currently tests the following: \n\n"); + fprintf(stream, "%*s %s\n", (int)max_test_name_len, "Name", " Description"); + fprintf(stream, "%*s %s\n", (int)max_test_name_len, "----", " -----------"); + + for (unsigned i = 0; i < TestCount; i++) + fprintf(stream, "%*s %s\n", (int)max_test_name_len, TestArray[i].Name, TestArray[i].Description); + + fprintf(stream, "\n\n"); } /* - * Print test info. + * Print out miscellaneous test program information */ void -TestInfo(const char *ProgName) +TestInfo(FILE *stream) { - if (mpi_rank_framework_g == 0) { - unsigned major, minor, release; + unsigned major, minor, release; - H5get_libversion(&major, &minor, &release); + /* If running in parallel, only print output from a single MPI process */ + if (TestFrameworkProcessID_g != 0) + return; - print_func("\nFor help use: %s -help\n", ProgName); - print_func("Linked with hdf5 version %u.%u release %u\n", major, minor, release); - } + if (!stream) + stream = stdout; + + H5get_libversion(&major, &minor, &release); + + fprintf(stream, "\nFor help use: %s -help\n", TestProgName); + fprintf(stream, "Linked with hdf5 version %u.%u release %u\n", major, minor, release); } /* - * Parse command line information. - * argc, argv: the usual command line argument count and strings - * - * Return: Void - * exit EXIT_FAILURE if error is encountered. + * Parse command line information */ -void +herr_t TestParseCmdLine(int argc, char *argv[]) { - bool skipped_all = false; - int ret_code; + herr_t ret_value = SUCCEED; while ((void)argv++, --argc > 0) { if ((strcmp(*argv, "-verbose") == 0) || (strcmp(*argv, "-v") == 0)) { if (argc > 0) { --argc; ++argv; - ParseTestVerbosity(*argv); + + if (ParseTestVerbosity(*argv) < 0) { + ret_value = FAIL; + goto done; + } } else { - TestUsage(); - exit(EXIT_FAILURE); + ret_value = FAIL; + goto done; } } else if (((strcmp(*argv, "-exclude") == 0) || (strcmp(*argv, "-x") == 0))) { if (argc > 0) { --argc; ++argv; - SetTest(*argv, SKIPTEST); + + if (SetTest(*argv, SKIPTEST) < 0) { + ret_value = FAIL; + goto done; + } } else { - TestUsage(); - exit(EXIT_FAILURE); + ret_value = FAIL; + goto done; } } else if (((strcmp(*argv, "-begin") == 0) || (strcmp(*argv, "-b") == 0))) { if (argc > 0) { --argc; ++argv; - SetTest(*argv, BEGINTEST); + + if (SetTest(*argv, BEGINTEST) < 0) { + ret_value = FAIL; + goto done; + } } else { - TestUsage(); - exit(EXIT_FAILURE); + ret_value = FAIL; + goto done; } } else if (((strcmp(*argv, "-only") == 0) || (strcmp(*argv, "-o") == 0))) { if (argc > 0) { - unsigned Loop; - --argc; ++argv; - /* Skip all tests, then activate only one. */ - if (!skipped_all) { - for (Loop = 0; Loop < Index; Loop++) - Test[Loop].SkipFlag = 1; - skipped_all = true; - } /* end if */ - SetTest(*argv, ONLYTEST); + if (SetTest(*argv, ONLYTEST) < 0) { + ret_value = FAIL; + goto done; + } } else { - TestUsage(); - exit(EXIT_FAILURE); + ret_value = FAIL; + goto done; } } else if ((strcmp(*argv, "-summary") == 0) || (strcmp(*argv, "-s") == 0)) - Summary = 1; - else if (strcmp(*argv, "-enable-error-stack") == 0) - enable_error_stack = 1; + TestDoSummary_g = true; + else if (strcmp(*argv, "-disable-error-stack") == 0) { + TestEnableErrorStack = false; + } else if ((strcmp(*argv, "-help") == 0) || (strcmp(*argv, "-h") == 0)) { - TestUsage(); + TestUsage(stdout); exit(EXIT_SUCCESS); } else if ((strcmp(*argv, "-cleanoff") == 0) || (strcmp(*argv, "-c") == 0)) SetTestNoCleanup(); + else if ((strcmp(*argv, "-maxthreads") == 0) || (strcmp(*argv, "-t") == 0)) { + if (argc > 0) { + long max_threads; + + --argc; + ++argv; + + errno = 0; + max_threads = strtol(*argv, NULL, 10); + if (errno != 0) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, + "error while parsing value (%s) specified for maximum number of threads\n", + *argv); + ret_value = FAIL; + goto done; + } + if (max_threads <= 0) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "invalid value (%ld) specified for maximum number of threads\n", + max_threads); + ret_value = FAIL; + goto done; + } + else if (max_threads > (long)INT_MAX) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "value (%ld) specified for maximum number of threads too large\n", + max_threads); + ret_value = FAIL; + goto done; + } + + SetTestMaxNumThreads((int)max_threads); + } + else { + ret_value = FAIL; + goto done; + } + } else { /* non-standard option. Break out. */ break; @@ -288,101 +391,150 @@ TestParseCmdLine(int argc, char *argv[]) } /* Call extra parsing function if provided. */ - if (NULL != TestPrivateParser) { - ret_code = TestPrivateParser(argc + 1, argv - 1); - if (ret_code != 0) - exit(EXIT_FAILURE); + if (NULL != TestPrivateParser_g) { + if (TestPrivateParser_g(argc + 1, argv - 1) < 0) { + ret_value = FAIL; + goto done; + } } + +done: + if (ret_value < 0) + TestUsage(stderr); + + return ret_value; } /* - * Perform Tests. + * Execute all tests that aren't being skipped */ -void +herr_t PerformTests(void) { - unsigned Loop; + for (unsigned Loop = 0; Loop < TestCount; Loop++) { + int old_num_errs = TestNumErrs_g; - for (Loop = 0; Loop < Index; Loop++) - if (Test[Loop].SkipFlag) { - if (mpi_rank_framework_g == 0) - MESSAGE(2, ("Skipping -- %s (%s) \n", Test[Loop].Description, Test[Loop].Name)); - } - else { - if (mpi_rank_framework_g == 0) - MESSAGE(2, ("Testing -- %s (%s) \n", Test[Loop].Description, Test[Loop].Name)); - if (mpi_rank_framework_g == 0) - MESSAGE(5, ("===============================================\n")); - Test[Loop].NumErrors = num_errs; - Test_parameters = Test[Loop].Parameters; - TestAlarmOn(); - Test[Loop].Call(); - TestAlarmOff(); - Test[Loop].NumErrors = num_errs - Test[Loop].NumErrors; - if (mpi_rank_framework_g == 0) { - MESSAGE(5, ("===============================================\n")); - MESSAGE(5, ("There were %d errors detected.\n\n", (int)Test[Loop].NumErrors)); - } + if (TestArray[Loop].TestSkipFlag) { + MESSAGE(2, ("Skipping -- %s (%s) \n", TestArray[Loop].Description, TestArray[Loop].Name)); + continue; } - Test_parameters = NULL; /* clear it. */ + MESSAGE(2, ("Testing -- %s (%s) \n", TestArray[Loop].Description, TestArray[Loop].Name)); + MESSAGE(5, ("===============================================\n")); - if (mpi_rank_framework_g == 0) { - MESSAGE(2, ("\n\n")); - if (num_errs) - print_func("!!! %d Error(s) were detected !!!\n\n", (int)num_errs); - else - print_func("All tests were successful. \n\n"); - } -} + if (TestAlarmOn() < 0) + MESSAGE(5, ("Couldn't enable test alarm timer for test -- %s (%s) \n", + TestArray[Loop].Description, TestArray[Loop].Name)); -/* - * Display test summary. - */ -void -TestSummary(void) -{ - unsigned Loop; + if (TestArray[Loop].TestSetupFunc) + TestArray[Loop].TestSetupFunc(TestArray[Loop].TestParameters); - print_func("Summary of Test Results:\n"); - print_func("Name of Test Errors Description of Test\n"); - print_func("---------------- ------ --------------------------------------\n"); + TestArray[Loop].TestFunc(TestArray[Loop].TestParameters); - for (Loop = 0; Loop < Index; Loop++) { - if (Test[Loop].NumErrors == -1) - print_func("%16s %6s %s\n", Test[Loop].Name, "N/A", Test[Loop].Description); - else - print_func("%16s %6d %s\n", Test[Loop].Name, (int)Test[Loop].NumErrors, Test[Loop].Description); + if (TestArray[Loop].TestCleanupFunc) + TestArray[Loop].TestCleanupFunc(TestArray[Loop].TestParameters); + + TestAlarmOff(); + + TestArray[Loop].TestNumErrors = TestNumErrs_g - old_num_errs; + + MESSAGE(5, ("===============================================\n")); + MESSAGE(5, ("There were %d errors detected.\n\n", TestArray[Loop].TestNumErrors)); } - print_func("\n\n"); + MESSAGE(2, ("\n\n")); + if (TestNumErrs_g) + MESSAGE(VERBO_NONE, ("!!! %d Error(s) were detected !!!\n\n", TestNumErrs_g)); + else + MESSAGE(VERBO_NONE, ("All tests were successful. \n\n")); + + return SUCCEED; } /* - * Cleanup files from testing + * Display a summary of running tests */ void -TestCleanup(void) +TestSummary(FILE *stream) { - unsigned Loop; + size_t max_test_name_len = 0; + size_t max_test_desc_len = 0; + size_t test_name_header_len = 0; + size_t test_desc_header_len = 0; + + /* If running in parallel, only print output from a single MPI process */ + if (TestFrameworkProcessID_g != 0) + return; + + if (!stream) + stream = stdout; + + /* Collect some information for cleaner printing */ + for (unsigned Loop = 0; Loop < TestCount; Loop++) { + size_t test_name_len = strlen(TestArray[Loop].Name); + size_t test_desc_len = strlen(TestArray[Loop].Description); + + if (test_name_len > max_test_name_len) + max_test_name_len = test_name_len; + if (test_desc_len > max_test_desc_len) + max_test_desc_len = test_desc_len; + } - if (mpi_rank_framework_g == 0) - MESSAGE(2, ("\nCleaning Up temp files...\n\n")); + test_name_header_len = MAX(max_test_name_len, strlen("Name of Test")); + test_desc_header_len = MAX(max_test_desc_len, strlen("Description of Test")); - /* call individual cleanup routines in each source module */ - for (Loop = 0; Loop < Index; Loop++) - if (!Test[Loop].SkipFlag && Test[Loop].Cleanup != NULL) - Test[Loop].Cleanup(); + /* Print header, adjusted to maximum test name and description lengths */ + fprintf(stream, "Summary of Test Results:\n"); + fprintf(stream, "%-*s Errors %-*s\n", (int)test_name_header_len, "Name of Test", + (int)test_desc_header_len, "Description of Test"); + + /* Print a separating line row for each column header, adjusted to maximum + * test name and description lengths + */ + for (size_t i = 0; i < test_name_header_len; i++) /* 'Name of Test' */ + putc('-', stream); + putc(' ', stream); + putc(' ', stream); + for (size_t i = 0; i < 6; i++) /* 'Errors' */ + putc('-', stream); + putc(' ', stream); + putc(' ', stream); + for (size_t i = 0; i < test_desc_header_len; i++) /* 'Description of Test' */ + putc('-', stream); + putc('\n', stream); + + for (unsigned Loop = 0; Loop < TestCount; Loop++) { + if (TestArray[Loop].TestNumErrors == -1) + fprintf(stream, "%-*s %-6s %-*s\n", (int)test_name_header_len, TestArray[Loop].Name, "N/A", + (int)test_desc_header_len, TestArray[Loop].Description); + else + fprintf(stream, "%-*s %-6d %-*s\n", (int)test_name_header_len, TestArray[Loop].Name, + TestArray[Loop].TestNumErrors, (int)test_desc_header_len, TestArray[Loop].Description); + } + + fprintf(stream, "\n\n"); } /* * Shutdown the test infrastructure */ -void +herr_t TestShutdown(void) { - if (Test) - free(Test); + /* Clean up test state first before tearing down testing framework */ + if (TestCleanupFunc_g && TestCleanupFunc_g() < 0) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: error occurred in test framework initialization callback\n", __func__); + return FAIL; + } + + if (TestArray) + for (unsigned Loop = 0; Loop < TestCount; Loop++) + free(TestArray[Loop].TestParameters); + + free(TestArray); + + return SUCCEED; } /* @@ -391,140 +543,83 @@ TestShutdown(void) H5_ATTR_PURE int GetTestVerbosity(void) { - return (TestVerbosity); + return TestVerbosity_g; } /* - * Set the verbosity level for the testing framework. - * Return previous verbosity level. + * Set the verbosity level for the testing framework */ int SetTestVerbosity(int newval) { int oldval; - oldval = TestVerbosity; - TestVerbosity = newval; - return (oldval); + if (newval < 0) + newval = VERBO_NONE; + else if (newval > VERBO_HI) + newval = VERBO_HI; + + oldval = TestVerbosity_g; + TestVerbosity_g = newval; + + return oldval; } /* * Retrieve the TestExpress mode for the testing framework - Values: - 0: Exhaustive run - Tests should take as long as necessary - 1: Full run. Default if H5_TEST_EXPRESS_LEVEL_DEFAULT - and HDF5TestExpress are not defined - Tests should take no more than 30 minutes - 2: Quick run - Tests should take no more than 10 minutes - 3: Smoke test. - Default if HDF5TestExpress is set to a value other than 0-3 - Tests should take less than 1 minute - - Design: - If the environment variable $HDF5TestExpress is defined, - or if a default testing level > 1 has been set via - H5_TEST_EXPRESS_LEVEL_DEFAULT, then test programs should - skip some tests so that they - complete sooner. - - Terms: - A "test" is a single executable, even if it contains multiple - sub-tests. - The standard system for test times is a Linux machine running in - NFS space (to catch tests that involve a great deal of disk I/O). - - Implementation: - I think this can be easily implemented in the test library (libh5test.a) - so that all tests can just call it to check the status of $HDF5TestExpress. */ int GetTestExpress(void) { - char *env_val; - - /* set it here for now. Should be done in something like h5test_init(). */ - if (TestExpress == -1) { - int express_val = 1; - - /* Check if a default test express level is defined (e.g., by build system) */ -#ifdef H5_TEST_EXPRESS_LEVEL_DEFAULT - express_val = H5_TEST_EXPRESS_LEVEL_DEFAULT; -#endif - - /* Check if HDF5TestExpress is set to override the default level */ - env_val = getenv("HDF5TestExpress"); - if (env_val) { - if (strcmp(env_val, "0") == 0) - express_val = 0; - else if (strcmp(env_val, "1") == 0) - express_val = 1; - else if (strcmp(env_val, "2") == 0) - express_val = 2; - else - express_val = 3; - } - - SetTestExpress(express_val); - } - - return (TestExpress); + return h5_get_testexpress(); } /* * Set the TestExpress mode for the testing framework. - * Return previous TestExpress mode. - * Values: non-zero means TestExpress mode is on, 0 means off. */ -int +void SetTestExpress(int newval) { - int oldval; - - oldval = TestExpress; - TestExpress = newval; - return (oldval); + h5_set_testexpress(newval); } /* - * Retrieve Summary request value. - * 0 means no summary, 1 means yes. + * Retrieve test summary request value. */ -H5_ATTR_PURE int +H5_ATTR_PURE bool GetTestSummary(void) { - return (Summary); + return TestDoSummary_g; } /* - * Retrieve Cleanup request value. - * 0 means no Cleanup, 1 means yes. + * Retrieve test file cleanup status value */ -H5_ATTR_PURE int +H5_ATTR_PURE bool GetTestCleanup(void) { - return (CleanUp); + /* Don't cleanup files if the HDF5_NOCLEANUP environment + * variable is defined to anything + */ + if (getenv(HDF5_NOCLEANUP)) + SetTestNoCleanup(); + + return TestDoCleanUp_g; } /* - * Set cleanup to no. - * Return previous cleanup value. + * Set test file cleanup status to "don't clean up temporary files" */ -int +void SetTestNoCleanup(void) { - int oldval; - - oldval = CleanUp; - CleanUp = 0; - return (oldval); + TestDoCleanUp_g = false; } /* * Parse an argument string for verbosity level and set it. */ -void +herr_t ParseTestVerbosity(char *argv) { if (*argv == 'l') @@ -533,8 +628,27 @@ ParseTestVerbosity(char *argv) SetTestVerbosity(VERBO_MED); else if (*argv == 'h') SetTestVerbosity(VERBO_HI); - else - SetTestVerbosity(atoi(argv)); + else { + long verb_level; + + errno = 0; + verb_level = strtol(argv, NULL, 10); + if (errno != 0) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: error while parsing value (%s) specified for test verbosity\n", __func__, + argv); + return FAIL; + } + + if (verb_level < 0) + verb_level = VERBO_DEF; + else if (verb_level > VERBO_HI) + verb_level = VERBO_HI; + + SetTestVerbosity((int)verb_level); + } + + return SUCCEED; } /* @@ -543,7 +657,7 @@ ParseTestVerbosity(char *argv) H5_ATTR_PURE int GetTestNumErrs(void) { - return (num_errs); + return TestNumErrs_g; } /* @@ -552,16 +666,7 @@ GetTestNumErrs(void) void IncTestNumErrs(void) { - num_errs++; -} - -/* - * Retrieve the current Test Parameters pointer. - */ -H5_ATTR_PURE const void * -GetTestParameters(void) -{ - return (Test_parameters); + TestNumErrs_g++; } /* @@ -575,11 +680,11 @@ TestErrPrintf(const char *format, ...) int ret_value; /* Increment the error count */ - num_errs++; + IncTestNumErrs(); /* Print the requested information */ va_start(arglist, format); - ret_value = vprintf(format, arglist); + ret_value = vfprintf(stderr, format, arglist); va_end(arglist); /* Return the length of the string produced (like printf() does) */ @@ -587,51 +692,84 @@ TestErrPrintf(const char *format, ...) } /* - * Set (control) which test will be tested. - * SKIPTEST: skip this test - * ONLYTEST: do only this test - * BEGINTEST: skip all tests before this test - * + * Change testing behavior in relation to a specific test */ -void +herr_t SetTest(const char *testname, int action) { - unsigned Loop; + static bool skipped_all = false; switch (action) { case SKIPTEST: - for (Loop = 0; Loop < Index; Loop++) - if (strcmp(testname, Test[Loop].Name) == 0) { - Test[Loop].SkipFlag = 1; + for (unsigned Loop = 0; Loop < TestCount; Loop++) + if (strcmp(testname, TestArray[Loop].Name) == 0) { + TestArray[Loop].TestSkipFlag = 1; break; } break; case BEGINTEST: - for (Loop = 0; Loop < Index; Loop++) { - if (strcmp(testname, Test[Loop].Name) != 0) - Test[Loop].SkipFlag = 1; + for (unsigned Loop = 0; Loop < TestCount; Loop++) { + if (strcmp(testname, TestArray[Loop].Name) != 0) + TestArray[Loop].TestSkipFlag = 1; else { /* Found it. Set it to run. Done. */ - Test[Loop].SkipFlag = 0; + TestArray[Loop].TestSkipFlag = 0; break; } } break; case ONLYTEST: - for (Loop = 0; Loop < Index; Loop++) { - if (strcmp(testname, Test[Loop].Name) == 0) { + /* Skip all tests, then keep track that we did that. + * Some testing prefers the convenience of being + * able to specify multiple tests to "only" run + * rather than specifying (possibly many more) tests + * to exclude, but we only want to skip all the + * tests a single time to facilitate this. + */ + if (!skipped_all) { + for (unsigned Loop = 0; Loop < TestCount; Loop++) + TestArray[Loop].TestSkipFlag = 1; + skipped_all = true; + } + + for (unsigned Loop = 0; Loop < TestCount; Loop++) { + if (strcmp(testname, TestArray[Loop].Name) == 0) { /* Found it. Set it to run. Break to skip the rest. */ - Test[Loop].SkipFlag = 0; + TestArray[Loop].TestSkipFlag = 0; break; } } break; default: /* error */ - if (mpi_rank_framework_g == 0) - printf("*** ERROR: Unknown action (%d) for SetTest\n", action); - break; + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: invalid action %d specified\n", __func__, action); + return FAIL; } + + return SUCCEED; +} + +/* + * Returns the value set for the maximum number of threads that a test + * program can spawn in addition to the main thread. + */ +H5_ATTR_PURE int +GetTestMaxNumThreads(void) +{ + return TestMaxNumThreads_g; +} + +/* + * Set the value for the maximum number of threads that a test program + * can spawn in addition to the main thread. + */ +herr_t +SetTestMaxNumThreads(int max_num_threads) +{ + TestMaxNumThreads_g = max_num_threads; + + return SUCCEED; } /* Enable a test timer that will kill long-running tests, the time is configurable @@ -640,7 +778,7 @@ SetTest(const char *testname, int action) * Only useful on POSIX systems where alarm(2) is present. This does not include * MinGW builds, which will often incorrectly decide that alarm(2) exists. */ -void +herr_t TestAlarmOn(void) { #ifdef H5_HAVE_ALARM @@ -648,12 +786,28 @@ TestAlarmOn(void) unsigned long alarm_sec = H5_ALARM_SEC; /* Number of seconds before alarm goes off */ /* Get the alarm value from the environment variable, if set */ - if (env_val != NULL) - alarm_sec = (unsigned)strtoul(env_val, (char **)NULL, 10); + if (env_val != NULL) { + errno = 0; + alarm_sec = strtoul(env_val, NULL, 10); + if (errno != 0) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: error while parsing value (%s) specified for alarm timeout\n", __func__, + env_val); + return FAIL; + } + else if (alarm_sec > (unsigned long)UINT_MAX) { + if (TestFrameworkProcessID_g == 0) + fprintf(stderr, "%s: value (%lu) specified for alarm timeout too large\n", __func__, + alarm_sec); + return FAIL; + } + } /* Set the number of seconds before alarm goes off */ alarm((unsigned)alarm_sec); #endif + + return SUCCEED; } /* Disable the test timer */ diff --git a/test/testframe.h b/test/testframe.h new file mode 100644 index 00000000000..21ffcfe673f --- /dev/null +++ b/test/testframe.h @@ -0,0 +1,803 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Purpose: Header file for a basic HDF5 testing framework + */ + +#ifndef H5TESTFRAME_H +#define H5TESTFRAME_H + +/* + * Include generic testing header, which includes the public + * HDF5 header, first. Including a private header first can + * cause the library to #undef the H5OPEN macro and cause odd + * test failures due to global IDs not being initialized. + */ +#include "h5test.h" + +#include "H5private.h" + +/**********/ +/* Macros */ +/**********/ + +/** + * \def MAXTESTNAME + * The maximum length for the name given to a test, including the NUL terminator + */ +#define MAXTESTNAME 64 + +/** + * \def MAXTESTDESC + * The maximum length for the description given to a test, including the NUL terminator + */ +#define MAXTESTDESC 128 + +/** + * \def H5_ALARM_SEC + * Number of seconds to wait before killing a test (requires alarm(2)) + */ +#define H5_ALARM_SEC 1200 /* default is 20 minutes */ + +/* + * Test controls definitions. + */ +#define SKIPTEST 1 /* Skip this test */ +#define ONLYTEST 2 /* Do only this test */ +#define BEGINTEST 3 /* Skip all tests before this test */ + +/* + * Predefined test verbosity levels. + * + * Convention: + * + * The higher the verbosity value, the more information printed. + * So, output for higher verbosity also include output of all lower + * verbosity. + */ +//! +/* + * + * Value Description + * 0 None: No informational message. + * 1 "All tests passed" + * 2 Header of overall test + * 3 Default: header and results of individual test + * 4 + * 5 Low: Major category of tests. + * 6 + * 7 Medium: Minor category of tests such as functions called. + * 8 + * 9 High: Highest level. All information. + */ +//! +#define VERBO_NONE 0 /* None */ +#define VERBO_DEF 3 /* Default */ +#define VERBO_LO 5 /* Low */ +#define VERBO_MED 7 /* Medium */ +#define VERBO_HI 9 /* High */ + +/* + * Verbose queries + * Only None needs an exact match. The rest are at least as much. + */ +#define VERBOSE_NONE (TestVerbosity_g == VERBO_NONE) +#define VERBOSE_DEF (TestVerbosity_g >= VERBO_DEF) +#define VERBOSE_LO (TestVerbosity_g >= VERBO_LO) +#define VERBOSE_MED (TestVerbosity_g >= VERBO_MED) +#define VERBOSE_HI (TestVerbosity_g >= VERBO_HI) + +/* Used to document process through a test */ +#define MESSAGE(V, A) \ + do { \ + if (TestFrameworkProcessID_g == 0 && TestVerbosity_g > (V)) \ + printf A; \ + } while (0) + +/************/ +/* Typedefs */ +/************/ + +/*************/ +/* Variables */ +/*************/ + +H5TEST_DLLVAR int TestFrameworkProcessID_g; +H5TEST_DLLVAR int TestVerbosity_g; + +/**************/ +/* Prototypes */ +/**************/ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Initializes the testing framework + * + * \param[in] ProgName The chosen name for the test executable to + * be used + * \param[in] TestPrivateUsage Pointer to a function which prints out + * additional usage help text that is specific + * to the test program + * \param[in] TestPrivateParser Pointer to a function which parses + * command-line arguments which are specific to + * the test program + * \param[in] TestSetupFunc Pointer to a function which will be called + * as part of TestInit() + * \param[in] TestCleanupFunc Pointer to a function which will be called + * when the testing framework is being shut + * down + * \param[in] TestProcessID ID for the process calling TestInit(). Used + * to control printing of output in parallel + * test programs. + * + * \return \herr_t + * + * \details TestInit() initializes the testing framework by setting up all + * the internal state needed for running tests. TestInit() should be + * called before any other function from this testing framework is + * called, but after other optional library setup functions, such + * as H5open() or H5dont_atexit(). + * + * \p ProgName is used to give a different name to the test program + * than the actual name of the executable. `argv[0]` should be + * passed for \p ProgName if a different name is not desired. + * + * \p TestPrivateUsage is a pointer to a function that can be used + * to print out additional usage help text that is specific to the + * test program when necessary. The TestUsage() function calls this + * function to print out the additional help text after printing out + * a more general set of help test instructions. \p TestPrivateUsage + * may be NULL. + * + * \p TestPrivateParser is a pointer to a function that can be used + * to parse command-line arguments which are specific to the test + * program. The TestParseCmdLine() function defers to this function + * when it encounters a command-line argument that is not among the + * standard list of arguments it recognizes. \p TestPrivateParser + * may be NULL. + * + * \p TestSetupFunc is a pointer to a function that can be used to + * setup any state needed before tests begin executing. If provided, + * this callback function will be called as part of TestInit() once + * the testing framework has been fully initialized. \p TestSetupFunc + * may be NULL. + * + * \p TestCleanupFunc is a pointer to a function that can be used + * to clean up any state after tests have finished executing. If + * provided, this callback function will be called by TestShutdown() + * before the testing framework starts being shut down. + * \p TestCleanupFunc may be NULL. + * + * \p TestProcessID is an integer value that is used to distinguish + * between processes when multiple are involved in running a test + * program. This is primarily useful for controlling testing + * framework output printed during execution of a parallel test + * program. For serial tests, the value 0 should always be passed. + * For parallel tests, the rank value of the MPI process, as obtained + * by calling MPI_Comm_rank(), should be passed. Test framework output + * is only printed from the process with ID 0. + * + * \see TestShutdown(), TestUsage(), TestParseCmdLine() + * + */ +H5TEST_DLL herr_t TestInit(const char *ProgName, void (*TestPrivateUsage)(FILE *stream), + int (*TestPrivateParser)(int argc, char *argv[]), herr_t (*TestSetupFunc)(void), + herr_t (*TestCleanupFunc)(void), int TestProcessID); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Shuts down the testing framework + * + * \return \herr_t + * + * \details TestShutdown() shuts down the testing framework by tearing down + * the internal state needed for running tests and freeing any + * associated memory. TestShutdown() should be called after any + * other function from this testing framework is called, and just + * before any optional library shutdown functions, such as H5close(). + * + * \see TestInit() + * + */ +H5TEST_DLL herr_t TestShutdown(void); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Prints out test program usage help text + * + * \param[in] stream Pointer to output stream to direct output to + * + * \return void + * + * \details TestUsage() prints out the test program's usage help text to + * the given output stream specified in \p stream. This includes the + * general list of command-line arguments accepted by the test + * program, additional test program-specific usage help text printed + * out by the optional callback specified in TestInit() and a list + * of all the tests and their descriptions, as added by AddTest(). + * \p stream may be NULL, in which case stdout is used. + * + * Note: when a parallel test calls TestUsage(), the output, + * including additional output from the optional callback specified + * in TestInit(), is only printed from the MPI process with rank + * value 0. Any collective operations should currently be avoided in + * the optional callback if one is provided. + * + * \see AddTest(), TestInit() + * + */ +H5TEST_DLL void TestUsage(FILE *stream); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Prints out miscellaneous test program information + * + * \param[in] stream Pointer to output stream to direct output to + * + * \return void + * + * \details TestInfo() prints out miscellaneous information for the test + * program, such as the version of the HDF5 library that the program + * is linked against. \p stream may be NULL, in which case stdout is + * used. + * + * Note: when a parallel test calls TestInfo(), the output is + * only printed from the MPI process with rank value 0. + * + */ +H5TEST_DLL void TestInfo(FILE *stream); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Adds a test to the list of tests be executed + * + * \param[in] TestName The chosen name for the test to be executed + * \param[in] TestFunc The function to call when executing the test + * \param[in] TestSetupFunc The function to call before executing the + * test + * \param[in] TestCleanupFunc The function to call after executing the test + * \param[in] TestData A pointer to additional data that will be + * passed to the test function and its setup and + * cleanup callbacks when the test runs + * \param[in] TestDataSize Size of the additional test data pointed to + * by \p TestData + * \param[in] TestDescr A short description of the test + * + * \return \herr_t + * + * \details AddTest() adds a new test to the list of tests that will be + * executed when PerformTests() is called by a test program. + * + * \p TestName is a short name given to a test that can be used to + * control how a test is executed, including skipping that test if + * necessary. The name specified in \p TestName must be #MAXTESTNAME + * bytes or less, including the NUL terminator. The name specified + * in \p TestName must also not be an empty string. If \p TestName + * begins with the character '-', the test will be set to be + * skipped by default. + * + * \p TestFunc is a pointer to the function that will be called for + * the test. The function must return no value and accept a single + * const void * as an argument, which will point to any parameters + * to be passed to the test that are specified in \p TestData. + * + * \p TestSetupFunc is an optional pointer to a function that will + * be called before the main test function is called. This allows + * tests to perform any pre-test setup necessary. The function must + * return no value and accept a single void * as an argument, which + * will point to any parameters to be passed to the test that are + * specified in \p TestData. + * + * \p TestCleanupFunc is an optional pointer to a function that + * will be called after a test's main test function has finished + * executing. This allows tests to perform any post-test cleanup + * necessary. The function must return no value and accept a single + * void * as an argument, which will point to any parameters to be + * passed to the test that are specified in \p TestData. + * + * \p TestData is an optional pointer to test parameters that will + * be passed to the test's main test function when executed, as well + * as the test's optional setup and cleanup callbacks. If given, the + * testing framework will make a copy of the parameters according to + * the size specified in \p TestDataSize. If \p TestData is not NULL, + * \p TestDataSize must be a positive value. Otherwise, if + * \p TestData is NULL, \p TestDataSize must be 0. + * + * \p TestDataSize is the size of the test parameter data to be + * passed to the test's main function and setup and callback + * functions during execution. If \p TestData is not NULL, + * \p TestDataSize must be a positive value. Otherwise, if + * \p TestData is NULL, \p TestDataSize must be 0. + * + * \p TestDescr is an informational description given to a test + * which may be printed out by the testing framework in various + * places. The string passed in \p TestDescr must be #MAXTESTDESC + * bytes or less, including the NUL terminator. The string passed + * in \p TestDescr may be an empty string, but it is advised that + * test authors give a description to a test. + * + * \see PerformTests() + * + */ +H5TEST_DLL herr_t AddTest(const char *TestName, void (*TestFunc)(const void *), void (*TestSetupFunc)(void *), + void (*TestCleanupFunc)(void *), const void *TestData, size_t TestDataSize, + const char *TestDescr); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Parses command-line arguments given to the test program + * + * \param[in] argc Command-line argument count; received from main() + * \param[in] argv Command-line argument array; received from main() + * + * \return \herr_t + * + * \details TestParseCmdLine() parses the command-line arguments given to the + * test program. If an optional argument parsing callback was + * specified in the call to TestInit(), TestParseCmdLine() will + * defer to that function for parsing command-line arguments that + * it doesn't recognize. Note: TestParseCmdLine() requires + * that all standard command-line arguments must appear before any + * non-standard arguments that would be parsed by an optional + * argument parsing callback function specified in TestInit(). + * + * Note: TestParseCmdLine() should not be called until all + * tests have been added by AddTest() since some of the command-line + * arguments that are parsed involve the ability to skip certain + * tests. + * + * \see TestInit() + * + */ +H5TEST_DLL herr_t TestParseCmdLine(int argc, char *argv[]); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Executes all tests added by AddTest() that aren't flagged to be + * skipped + * + * \return \herr_t + * + * \details PerformTests() runs all tests that aren't flagged to be skipped + * in the order added by calls to AddTest(). For each test, the + * test's setup callback function (if supplied) will be called + * first, followed by the test's primary function and then the + * test's cleanup callback function (if supplied). Before each test + * begins, a timer is enabled by a call to TestAlarmOn() to prevent + * the test from running longer than desired. A call to + * TestAlarmOff() disables this timer after each test has finished. + * + * \see AddTest(), TestAlarmOn() + * + */ +H5TEST_DLL herr_t PerformTests(void); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Prints out a summary of the results of running tests + * + * \param[in] stream Pointer to output stream to direct output to + * + * \return void + * + * \details TestSummary() prints out a summary of testing results, including + * each test's name, description and the number of errors that + * occurred during the test's execution. If a test was skipped, the + * number of errors for that test will show as "N/A". \p stream may + * be NULL, in which case stdout is used. + * + * Note: when a parallel test calls TestSummary(), the output + * is only printed from the MPI process with rank value 0. + * + */ +H5TEST_DLL void TestSummary(FILE *stream); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Returns the current test verbosity level setting + * + * \return The current test verbosity level setting + * + * \details GetTestVerbosity() returns the current setting for the level of + * test verbosity. These levels are as follows: + * + * \snippet this test_verbo_snip + * + * \see SetTestVerbosity() + * + */ +H5TEST_DLL int GetTestVerbosity(void); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Sets the current test verbosity level setting + * + * \return The previous test verbosity level setting + * + * \details SetTestVerbosity() sets a new value for the level of test + * verbosity and returns the previous value. These levels are as + * follows: + * + * \snippet this test_verbo_snip + * + * If \p newval is negative, the test verbosity level is set to the + * lowest value (VERBO_NONE). If \p newval is greater than the + * highest verbosity value, it is set to the highest verbosity value + * (VERBO_HI). + * + * \see GetTestVerbosity() + * + */ +H5TEST_DLL int SetTestVerbosity(int newval); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Parses a string for a test verbosity level setting, then sets the + * test verbosity level to that setting + * + * \return \herr_t + * + * \details ParseTestVerbosity() parses a string for a test verbosity level + * setting, then sets the test verbosity level to that setting. The + * string may be the character 'l' (for low verbosity), 'm' (for + * medium verbosity), 'h' (for high verbosity) or a number between + * 0-9, corresponding to the different predefined levels of test + * verbosity. If a negative number is specified, the test verbosity + * level is set to the default (VERBO_DEF). If a number greater + * than VERBO_HI is specified, the test verbosity level is set to + * VERBO_HI. If ParseTestVerbosity() can't parse the string, a + * negative value will be returned to indicate failure. + * + * \see GetTestVerbosity(), SetTestVerbosity() + * + */ +H5TEST_DLL herr_t ParseTestVerbosity(char *argv); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Returns the current TestExpress setting for expedited testing + * + * \return The current TestExpress setting + * + * \details GetTestExpress() returns the current setting for the TestExpress + * variable which controls whether or not some testing should be + * expedited. The variable may be set to one of the following + * values: + * + * 0: Exhaustive run + * Tests should take as long as necessary + * 1: Full run. Default value if H5_TEST_EXPRESS_LEVEL_DEFAULT + * and the HDF5TestExpress environment variable are not defined + * Tests should take no more than 30 minutes + * 2: Quick run + * Tests should take no more than 10 minutes + * 3: Smoke test. + * Default if the HDF5TestExpress environment variable is set to + * a value other than 0-3 + * Tests should take less than 1 minute + * + * The macro H5_TEST_EXPRESS_LEVEL_DEFAULT may be defined to one + * of these values at library configuration time in order to + * override the default value set for TestExpress. The TestExpress + * value may also be overridden at run time by setting the + * HDF5TestExpress environment variable to one of these values. + * + * The limitation imposed by the TestExpress functionality applies + * to the total runtime of a test executable, even if it contains + * multiple sub-tests. + * + * The standard system for test times is a Linux machine running in + * NFS space (to catch tests that involve a great deal of disk I/O). + * + * \see SetTestExpress() + * + */ +H5TEST_DLL int GetTestExpress(void); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Sets the current TestExpress setting for expedited testing + * + * \return void + * + * \details SetTestExpress() sets a new value for the TestExpress variable + * which controls whether or not some testing should be expedited. + * + * If \p newval is negative, the TestExpress value is set to the + * default value (1). If \p newval is greater than the highest + * TestExpress value, it is set to the highest TestExpress value + * (3). + * + * \see GetTestExpress() + * + */ +H5TEST_DLL void SetTestExpress(int newval); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Returns the current test summary setting + * + * \return The current test summary setting + * + * \details GetTestSummary() returns whether or not a test program should + * call TestSummary() to print out a summary of test results after + * tests have run. This summary includes each test's name, + * description and the number of errors that occurred during the + * test's execution. + * + * \see TestSummary() + * + */ +H5TEST_DLL bool GetTestSummary(void); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Returns the current test file cleanup status setting + * + * \return The current test file cleanup status setting + * + * \details GetTestCleanup() returns whether or not a test should clean up + * any temporary files it has created when it is finished running. + * If true is returned, the test should clean up temporary files. + * Otherwise, it should leave them in place. + * + * \see SetTestNoCleanup() + * + */ +H5TEST_DLL bool GetTestCleanup(void); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Sets the test file cleanup status setting to "don't clean up + * temporary files" + * + * \return void + * + * \details SetTestNoCleanup() sets the temporary test file cleanup status + * to false, causing future calls to GetTestCleanup() to return + * false and inform tests that they should not clean up temporary + * test files they have created. + * + * \see GetTestCleanup() + * + */ +H5TEST_DLL void SetTestNoCleanup(void); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Returns the number of errors recorded for the test program + * + * \return The recorded number of errors + * + * \details GetTestNumErrs() returns the total number of errors recorded + * during the execution of the test program. This number is + * primarily used to determine whether the test program should exit + * with a success or failure value. + * + * \see IncTestNumErrs() + * + */ +H5TEST_DLL int GetTestNumErrs(void); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Increments the number of errors recorded for the test program + * + * \return void + * + * \details IncTestNumErrs() increments the number of errors recorded + * for the test program. + * + * \see GetTestNumErrs() + * + */ +H5TEST_DLL void IncTestNumErrs(void); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Prints out error messages to stderr and increments the number of + * test program errors + * + * \return return value of vfprintf() + * + * \details TestErrPrintf() is a wrapper around vfprintf() that can be used + * to print out messages to stderr when a test failure occurs. + * TestErrPrintf() increments the number of errors recorded for the + * test program when called. + * + */ +H5TEST_DLL int TestErrPrintf(const char *format, ...) H5_ATTR_FORMAT(printf, 1, 2); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Change test execution for a particular test + * + * \return \herr_t + * + * \details SetTest() is used to change how test execution occurs in relation + * to a particular test. \p testname is the name of the test, as + * specified by AddTest(), to change the behavior for. \p action + * should be one of the following macros: + * + * SKIPTEST - informs the testing framework to skip the test + * specified by \p testname + * ONLYTEST - informs the testing framework to only run the test + * specified by \p testname and skip all other tests + * BEGINTEST - informs the testing framework to start running tests + * at the test specified by \p testname and skip all + * tests before it (in the order added by calls to + * AddTest()) + * + * Other values for \p action will cause SetTest() to return + * a negative value for failure. + * + * Multiple tests can be set to the value ONLYTEST in order to run a + * subset of tests. This is intended as a convenient alternative to + * needing to skip many other tests by setting them to the value + * SKIPTEST. + * + * \see AddTest() + * + */ +H5TEST_DLL herr_t SetTest(const char *testname, int action); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Returns the maximum number of threads a test program is allowed to + * spawn in addition to the main thread + * + * \return The maximum number of allowed spawned threads + * + * \details GetTestMaxNumThreads() returns the value for the maximum number + * of threads a test program is allowed to spawn in addition to the + * main thread for the test program. This number is usually + * configured by a command-line argument passed to the test program + * and is intended for allowing tests to adjust their workload + * according to the resources of the testing environment. + * + * The default value is -1, which means that multi-threaded tests + * aren't limited in the number of threads they can spawn, but + * should still only use a reasonable amount of threads. The value + * 0 indicates that no additional threads should be spawned, which + * is primarily for testing purposes. The value returned by + * GetTestMaxNumThreads() is meaningless for non-multi-threaded + * tests. + * + * \see SetTestMaxNumThreads() + * + */ +H5TEST_DLL int GetTestMaxNumThreads(void); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Sets the maximum number of threads a test program is allowed to + * spawn in addition to the main thread + * + * \return \herr_t + * + * \details SetTestMaxNumThreads() sets the value for the maximum number of + * threads a test program is allowed to spawn in addition to the + * main thread for the test program. This number is usually + * configured by a command-line argument passed to the test program + * and is intended for allowing tests to adjust their workload + * according to the resources of the testing environment. + * + * If \p max_num_threads is a negative value, test programs will be + * allowed to spawn any number of threads, though it is advised + * that test programs try to limit this to a reasonable number. + * The value 0 indicates that no additional threads should be + * spawned, which is primarily for testing purposes. + * + * \see SetTestMaxNumThreads() + * + */ +H5TEST_DLL herr_t SetTestMaxNumThreads(int max_num_threads); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Enables a global test timer + * + * \return \herr_t + * + * \details TestAlarmOn() enables a global test timer through use of + * alarm(2). This timer is intended to stop long-running or hanging + * tests after a configurable amount of time. The default time + * allowed for a test program is 1200 seconds (20 minutes). The + * environment variable HDF5_ALARM_SECONDS may be set to a number of + * seconds in order to override this value. However, a test program + * may still be limited by the build system used to build the + * library. For example, HDF5's CMake code has a default limit of + * 1200 seconds for a test program. + * + * If support for alarm(2) is not available on the system, this + * function has no effect. + * + * \see TestAlarmOff() + * + */ +H5TEST_DLL herr_t TestAlarmOn(void); + +/** + * -------------------------------------------------------------------------- + * \ingroup H5TEST + * + * \brief Disables a global test timer + * + * \return void + * + * \details TestAlarmOff() disables a global test timer as enabled by + * TestAlarmOn(). + * + * If support for alarm(2) is not available on the system, this + * function has no effect. + * + * \see TestAlarmOn() + * + */ +H5TEST_DLL void TestAlarmOff(void); + +#ifdef __cplusplus +} +#endif + +#endif /* H5TESTFRAME_H */ diff --git a/test/testhdf5.c b/test/testhdf5.c index 660fee9e2cd..925b15981e0 100644 --- a/test/testhdf5.c +++ b/test/testhdf5.c @@ -18,11 +18,11 @@ General test wrapper for HDF5 base library test programs DESIGN - Each test function should be implemented as function having no - parameters and returning void (i.e. no return value). They should be put - into the list of AddTest() calls in main() below. Functions which depend - on other functionality should be placed below the AddTest() call for the - base functionality testing. + Each test function should be implemented as function having a single + const void * parameter and returning void (i.e. no return value). They + should be put into the list of AddTest() calls in main() below. Functions + which depend on other functionality should be placed below the AddTest() + call for the base functionality testing. Each test module should include testhdf5.h and define a unique set of names for test files they create. @@ -47,53 +47,64 @@ main(int argc, char *argv[]) H5Pclose(fapl_id); /* Initialize testing framework */ - TestInit(argv[0], NULL, NULL); + if (TestInit(argv[0], NULL, NULL, NULL, NULL, 0) < 0) { + fprintf(stderr, "couldn't initialize testing framework\n"); + exit(EXIT_FAILURE); + } /* Tests are generally arranged from least to most complexity... */ - AddTest("config", test_configure, cleanup_configure, "Configure definitions", NULL); - AddTest("h5system", test_h5_system, cleanup_h5_system, "H5system routines", NULL); - AddTest("metadata", test_metadata, cleanup_metadata, "Encoding/decoding metadata", NULL); - AddTest("checksum", test_checksum, cleanup_checksum, "Checksum algorithm", NULL); - AddTest("skiplist", test_skiplist, NULL, "Skip Lists", NULL); - AddTest("refstr", test_refstr, NULL, "Reference Counted Strings", NULL); - AddTest("file", test_file, cleanup_file, "Low-Level File I/O", NULL); - AddTest("objects", test_h5o, cleanup_h5o, "Generic Object Functions", NULL); - AddTest("h5s", test_h5s, cleanup_h5s, "Dataspaces", NULL); - AddTest("coords", test_coords, cleanup_coords, "Dataspace coordinates", NULL); - AddTest("sohm", test_sohm, cleanup_sohm, "Shared Object Header Messages", NULL); - AddTest("attr", test_attr, cleanup_attr, "Attributes", NULL); - AddTest("select", test_select, cleanup_select, "Selections", NULL); - AddTest("time", test_time, cleanup_time, "Time Datatypes", NULL); - AddTest("ref_deprec", test_reference_deprec, cleanup_reference_deprec, "Deprecated References", NULL); - AddTest("ref", test_reference, cleanup_reference, "References", NULL); - AddTest("vltypes", test_vltypes, cleanup_vltypes, "Variable-Length Datatypes", NULL); - AddTest("vlstrings", test_vlstrings, cleanup_vlstrings, "Variable-Length Strings", NULL); - AddTest("iterate", test_iterate, cleanup_iterate, "Group & Attribute Iteration", NULL); - AddTest("array", test_array, cleanup_array, "Array Datatypes", NULL); - AddTest("genprop", test_genprop, cleanup_genprop, "Generic Properties", NULL); - AddTest("unicode", test_unicode, cleanup_unicode, "UTF-8 Encoding", NULL); - AddTest("id", test_ids, NULL, "User-Created Identifiers", NULL); - AddTest("misc", test_misc, cleanup_misc, "Miscellaneous", NULL); + AddTest("config", test_configure, NULL, cleanup_configure, NULL, 0, "Configure definitions"); + AddTest("h5system", test_h5_system, NULL, cleanup_h5_system, NULL, 0, "H5system routines"); + AddTest("metadata", test_metadata, NULL, cleanup_metadata, NULL, 0, "Encoding/decoding metadata"); + AddTest("checksum", test_checksum, NULL, cleanup_checksum, NULL, 0, "Checksum algorithm"); + AddTest("skiplist", test_skiplist, NULL, NULL, NULL, 0, "Skip Lists"); + AddTest("refstr", test_refstr, NULL, NULL, NULL, 0, "Reference Counted Strings"); + AddTest("file", test_file, NULL, cleanup_file, NULL, 0, "Low-Level File I/O"); + AddTest("objects", test_h5o, NULL, cleanup_h5o, NULL, 0, "Generic Object Functions"); + AddTest("h5s", test_h5s, NULL, cleanup_h5s, NULL, 0, "Dataspaces"); + AddTest("coords", test_coords, NULL, cleanup_coords, NULL, 0, "Dataspace coordinates"); + AddTest("sohm", test_sohm, NULL, cleanup_sohm, NULL, 0, "Shared Object Header Messages"); + AddTest("attr", test_attr, NULL, cleanup_attr, NULL, 0, "Attributes"); + AddTest("select", test_select, NULL, cleanup_select, NULL, 0, "Selections"); + AddTest("time", test_time, NULL, cleanup_time, NULL, 0, "Time Datatypes"); + AddTest("ref_deprec", test_reference_deprec, NULL, cleanup_reference_deprec, NULL, 0, + "Deprecated References"); + AddTest("ref", test_reference, NULL, cleanup_reference, NULL, 0, "References"); + AddTest("vltypes", test_vltypes, NULL, cleanup_vltypes, NULL, 0, "Variable-Length Datatypes"); + AddTest("vlstrings", test_vlstrings, NULL, cleanup_vlstrings, NULL, 0, "Variable-Length Strings"); + AddTest("iterate", test_iterate, NULL, cleanup_iterate, NULL, 0, "Group & Attribute Iteration"); + AddTest("array", test_array, NULL, cleanup_array, NULL, 0, "Array Datatypes"); + AddTest("genprop", test_genprop, NULL, cleanup_genprop, NULL, 0, "Generic Properties"); + AddTest("unicode", test_unicode, NULL, cleanup_unicode, NULL, 0, "UTF-8 Encoding"); + AddTest("id", test_ids, NULL, NULL, NULL, 0, "User-Created Identifiers"); + AddTest("misc", test_misc, NULL, cleanup_misc, NULL, 0, "Miscellaneous"); /* Display testing information */ - TestInfo(argv[0]); + TestInfo(stdout); /* Parse command line arguments */ - TestParseCmdLine(argc, argv); + if (TestParseCmdLine(argc, argv) < 0) { + fprintf(stderr, "couldn't parse command-line arguments\n"); + TestShutdown(); + exit(EXIT_FAILURE); + } /* Perform requested testing */ - PerformTests(); + if (PerformTests() < 0) { + fprintf(stderr, "couldn't run tests\n"); + TestShutdown(); + exit(EXIT_FAILURE); + } /* Display test summary, if requested */ if (GetTestSummary()) - TestSummary(); - - /* Clean up test files, if allowed */ - if (GetTestCleanup() && !getenv(HDF5_NOCLEANUP)) - TestCleanup(); + TestSummary(stdout); /* Release test infrastructure */ - TestShutdown(); + if (TestShutdown() < 0) { + fprintf(stderr, "couldn't shut down testing framework\n"); + exit(EXIT_FAILURE); + } /* Exit failure if errors encountered; else exit success. */ /* No need to print anything since PerformTests() already does. */ diff --git a/test/testhdf5.h b/test/testhdf5.h index 75c94ee9b6b..f4f3136d8a4 100644 --- a/test/testhdf5.h +++ b/test/testhdf5.h @@ -17,17 +17,20 @@ #ifndef TESTHDF5_H #define TESTHDF5_H -/* Include generic testing header also */ +/* Include generic testing header */ #include "h5test.h" +/* Include testing framework functionality */ +#include "testframe.h" + /* Use %ld to print the value because long should cover most cases. */ /* Used to make certain a return value _is_not_ a value */ #define CHECK(ret, val, where) \ do { \ if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d " \ - "in %s returned %ld \n", \ - where, (int)__LINE__, __FILE__, (long)(ret)); \ + printf(" Call to routine: %15s at line %4d " \ + "in %s returned %ld \n", \ + where, (int)__LINE__, __FILE__, (long)(ret)); \ } \ if ((ret) == (val)) { \ TestErrPrintf("*** UNEXPECTED RETURN from %s is %ld at line %4d " \ @@ -40,8 +43,8 @@ #define CHECK_I(ret, where) \ do { \ if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s returned %ld\n", (where), (int)__LINE__, \ - __FILE__, (long)(ret)); \ + printf(" Call to routine: %15s at line %4d in %s returned %ld\n", (where), (int)__LINE__, \ + __FILE__, (long)(ret)); \ } \ if ((ret) < 0) { \ TestErrPrintf("*** UNEXPECTED RETURN from %s is %ld line %4d in %s\n", (where), (long)(ret), \ @@ -54,8 +57,8 @@ #define CHECK_PTR(ret, where) \ do { \ if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ - __FILE__, ((const void *)ret)); \ + printf(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ + __FILE__, ((const void *)ret)); \ } \ if (!(ret)) { \ TestErrPrintf("*** UNEXPECTED RETURN from %s is NULL line %4d in %s\n", (where), (int)__LINE__, \ @@ -68,8 +71,8 @@ #define CHECK_PTR_NULL(ret, where) \ do { \ if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ - __FILE__, ((const void *)ret)); \ + printf(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ + __FILE__, ((const void *)ret)); \ } \ if (ret) { \ TestErrPrintf("*** UNEXPECTED RETURN from %s is not NULL line %4d in %s\n", (where), \ @@ -82,8 +85,8 @@ #define CHECK_PTR_EQ(ret, val, where) \ do { \ if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ - __FILE__, (const void *)(ret)); \ + printf(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ + __FILE__, (const void *)(ret)); \ } \ if (ret != val) { \ TestErrPrintf( \ @@ -98,9 +101,9 @@ do { \ long __x = (long)_x, __val = (long)_val; \ if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s had value " \ - "%ld \n", \ - (where), (int)__LINE__, __FILE__, __x); \ + printf(" Call to routine: %15s at line %4d in %s had value " \ + "%ld \n", \ + (where), (int)__LINE__, __FILE__, __x); \ } \ if ((__x) != (__val)) { \ TestErrPrintf("*** UNEXPECTED VALUE from %s should be %ld, but is %ld at line %4d " \ @@ -115,8 +118,8 @@ do { \ _type __x = (_type)_x, __val = (_type)_val; \ if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s had value " _format " \n", (where), \ - (int)__LINE__, __FILE__, __x); \ + printf(" Call to routine: %15s at line %4d in %s had value " _format " \n", (where), \ + (int)__LINE__, __FILE__, __x); \ } \ if ((__x) != (__val)) { \ TestErrPrintf("*** UNEXPECTED VALUE from %s should be " _format ", but is " _format \ @@ -131,9 +134,9 @@ #define VERIFY_STR(x, val, where) \ do { \ if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s had value " \ - "%s \n", \ - (where), (int)__LINE__, __FILE__, x); \ + printf(" Call to routine: %15s at line %4d in %s had value " \ + "%s \n", \ + (where), (int)__LINE__, __FILE__, x); \ } \ if (strcmp(x, val) != 0) { \ TestErrPrintf("*** UNEXPECTED VALUE from %s should be %s, but is %s at line %4d " \ @@ -147,9 +150,9 @@ #define RESULT(ret, func) \ do { \ if (VERBOSE_MED) { \ - print_func(" Call to routine: %15s at line %4d in %s returned " \ - "%ld\n", \ - func, (int)__LINE__, __FILE__, (long)(ret)); \ + printf(" Call to routine: %15s at line %4d in %s returned " \ + "%ld\n", \ + func, (int)__LINE__, __FILE__, (long)(ret)); \ } \ if (VERBOSE_HI) \ H5Eprint2(H5E_DEFAULT, stdout); \ @@ -161,94 +164,70 @@ } \ } while (0) -/* Used to document process through a test */ -#if defined(H5_HAVE_PARALLEL) && defined(H5_PARALLEL_TEST) -#define MESSAGE(V, A) \ - do { \ - int mpi_rank; \ - \ - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); \ - if (mpi_rank == 0 && HDGetTestVerbosity() > (V)) \ - print_func A; \ - } while (0) -#else /* H5_HAVE_PARALLEL */ -#define MESSAGE(V, A) \ - do { \ - if (HDGetTestVerbosity() > (V)) \ - print_func A; \ - } while (0) -#endif /* H5_HAVE_PARALLEL */ - /* Used to indicate an error that is complex to check for */ #define ERROR(where) \ do { \ if (VERBOSE_HI) \ - print_func(" Call to routine: %15s at line %4d in %s returned " \ - "invalid result\n", \ - where, (int)__LINE__, __FILE__); \ + printf(" Call to routine: %15s at line %4d in %s returned " \ + "invalid result\n", \ + where, (int)__LINE__, __FILE__); \ TestErrPrintf("*** UNEXPECTED RESULT from %s at line %4d in %s\n", where, (int)__LINE__, __FILE__); \ } while (0) -/* definitions for command strings */ -#define VERBOSITY_STR "Verbosity" -#define SKIP_STR "Skip" -#define TEST_STR "Test" -#define CLEAN_STR "Cleanup" - #ifdef __cplusplus extern "C" { #endif /* Prototypes for the test routines */ -void test_metadata(void); -void test_checksum(void); -void test_refstr(void); -void test_file(void); -void test_h5o(void); -void test_h5t(void); -void test_h5s(void); -void test_coords(void); -void test_h5d(void); -void test_attr(void); -void test_select(void); -void test_time(void); -void test_reference(void); -void test_reference_deprec(void); -void test_vltypes(void); -void test_vlstrings(void); -void test_iterate(void); -void test_array(void); -void test_genprop(void); -void test_configure(void); -void test_h5_system(void); -void test_misc(void); -void test_ids(void); -void test_skiplist(void); -void test_sohm(void); -void test_unicode(void); +void test_metadata(const void *params); +void test_checksum(const void *params); +void test_refstr(const void *params); +void test_file(const void *params); +void test_h5o(const void *params); +void test_h5t(const void *params); +void test_h5s(const void *params); +void test_coords(const void *params); +void test_h5d(const void *params); +void test_attr(const void *params); +void test_select(const void *params); +void test_time(const void *params); +void test_reference(const void *params); +void test_reference_deprec(const void *params); +void test_vltypes(const void *params); +void test_vlstrings(const void *params); +void test_iterate(const void *params); +void test_array(const void *params); +void test_genprop(const void *params); +void test_configure(const void *params); +void test_h5_system(const void *params); +void test_misc(const void *params); +void test_ids(const void *params); +void test_skiplist(const void *params); +void test_sohm(const void *params); +void test_unicode(const void *params); /* Prototypes for the cleanup routines */ -void cleanup_metadata(void); -void cleanup_checksum(void); -void cleanup_file(void); -void cleanup_h5o(void); -void cleanup_h5s(void); -void cleanup_coords(void); -void cleanup_attr(void); -void cleanup_select(void); -void cleanup_time(void); -void cleanup_reference(void); -void cleanup_reference_deprec(void); -void cleanup_vltypes(void); -void cleanup_vlstrings(void); -void cleanup_iterate(void); -void cleanup_array(void); -void cleanup_genprop(void); -void cleanup_configure(void); -void cleanup_h5_system(void); -void cleanup_sohm(void); -void cleanup_misc(void); -void cleanup_unicode(void); +void cleanup_metadata(void *params); +void cleanup_checksum(void *params); +void cleanup_file(void *params); +void cleanup_h5o(void *params); +void cleanup_h5s(void *params); +void cleanup_coords(void *params); +void cleanup_attr(void *params); +void cleanup_select(void *params); +void cleanup_time(void *params); +void cleanup_reference(void *params); +void cleanup_reference_deprec(void *params); +void cleanup_vltypes(void *params); +void cleanup_vlstrings(void *params); +void cleanup_iterate(void *params); +void cleanup_array(void *params); +void cleanup_genprop(void *params); +void cleanup_configure(void *params); +void cleanup_h5_system(void *params); +void cleanup_sohm(void *params); +void cleanup_misc(void *params); +void cleanup_unicode(void *params); #ifdef __cplusplus } diff --git a/test/tfile.c b/test/tfile.c index 02f996f66a3..0915ef78ba3 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -4885,7 +4885,11 @@ test_sects_freespace(const char *driver_name, bool new_format) CHECK(nall, FAIL, "H5Fget_free_sections"); /* Should return failure when nsects is 0 with a nonnull sect_info */ - nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, all_sect_info); + H5E_BEGIN_TRY + { + nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, all_sect_info); + } + H5E_END_TRY VERIFY(nsects, FAIL, "H5Fget_free_sections"); /* Retrieve and verify free space info for all the sections */ @@ -5108,7 +5112,11 @@ test_filespace_compatible(void) CHECK(fid, FAIL, "H5Fopen"); /* The dataset should not be there */ - did = H5Dopen2(fid, DSETNAME, H5P_DEFAULT); + H5E_BEGIN_TRY + { + did = H5Dopen2(fid, DSETNAME, H5P_DEFAULT); + } + H5E_END_TRY VERIFY(did, FAIL, "H5Dopen"); /* There should not be any free space in the file */ @@ -6195,6 +6203,7 @@ test_libver_bounds_super_create(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t n case H5F_LIBVER_V112: case H5F_LIBVER_V114: case H5F_LIBVER_V116: + case H5F_LIBVER_V118: ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_3); VERIFY(ok, true, "HDF5_superblock_ver_bounds"); break; @@ -8389,7 +8398,7 @@ test_deprec(const char *driver_name) ** ****************************************************************/ void -test_file(void) +test_file(const void H5_ATTR_UNUSED *params) { const char *driver_name; /* File Driver value from environment */ hid_t fapl_id = H5I_INVALID_HID; /* VFD-dependent fapl ID */ @@ -8489,20 +8498,22 @@ test_file(void) *------------------------------------------------------------------------- */ void -cleanup_file(void) +cleanup_file(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(SFILE1, H5P_DEFAULT); - H5Fdelete(FILE1, H5P_DEFAULT); - H5Fdelete(FILE2, H5P_DEFAULT); - H5Fdelete(FILE3, H5P_DEFAULT); - H5Fdelete(FILE4, H5P_DEFAULT); - H5Fdelete(FILE5, H5P_DEFAULT); - H5Fdelete(FILE6, H5P_DEFAULT); - H5Fdelete(FILE7, H5P_DEFAULT); - H5Fdelete(FILE8, H5P_DEFAULT); - H5Fdelete(DST_FILE, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(SFILE1, H5P_DEFAULT); + H5Fdelete(FILE1, H5P_DEFAULT); + H5Fdelete(FILE2, H5P_DEFAULT); + H5Fdelete(FILE3, H5P_DEFAULT); + H5Fdelete(FILE4, H5P_DEFAULT); + H5Fdelete(FILE5, H5P_DEFAULT); + H5Fdelete(FILE6, H5P_DEFAULT); + H5Fdelete(FILE7, H5P_DEFAULT); + H5Fdelete(FILE8, H5P_DEFAULT); + H5Fdelete(DST_FILE, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/tgenprop.c b/test/tgenprop.c index 4f4f60e79ab..b4267b66a87 100644 --- a/test/tgenprop.c +++ b/test/tgenprop.c @@ -2145,7 +2145,7 @@ test_genprop_deprec_list(void) ** ****************************************************************/ void -test_genprop(void) +test_genprop(const void H5_ATTR_UNUSED *params) { /* Output message about test being performed */ MESSAGE(5, ("Testing Generic Properties\n")); @@ -2188,11 +2188,13 @@ test_genprop(void) *------------------------------------------------------------------------- */ void -cleanup_genprop(void) +cleanup_genprop(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(FILENAME, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/th5_system.c b/test/th5_system.c index eb6787f9fb8..f55642b8941 100644 --- a/test/th5_system.c +++ b/test/th5_system.c @@ -551,7 +551,7 @@ test_h5_strndup(void) } void -test_h5_system(void) +test_h5_system(const void H5_ATTR_UNUSED *params) { MESSAGE(5, ("Testing H5system routines\n")); @@ -563,7 +563,7 @@ test_h5_system(void) } void -cleanup_h5_system(void) +cleanup_h5_system(void H5_ATTR_UNUSED *params) { /* Nothing to cleanup yet */ } diff --git a/test/th5o.c b/test/th5o.c index 815b5648e4d..f4eddfc4f5c 100644 --- a/test/th5o.c +++ b/test/th5o.c @@ -1887,7 +1887,7 @@ test_h5o_getinfo_visit(void) ** ****************************************************************/ void -test_h5o(void) +test_h5o(const void H5_ATTR_UNUSED *params) { /* Output message about test being performed */ MESSAGE(5, ("Testing Objects\n")); @@ -1920,14 +1920,16 @@ test_h5o(void) *------------------------------------------------------------------------- */ void -cleanup_h5o(void) +cleanup_h5o(void H5_ATTR_UNUSED *params) { - char filename[1024]; - - H5E_BEGIN_TRY - { - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - H5Fdelete(filename, H5P_DEFAULT); + if (GetTestCleanup()) { + char filename[1024]; + + H5E_BEGIN_TRY + { + h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); + H5Fdelete(filename, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/th5s.c b/test/th5s.c index 4c46c400116..e3505f2d0fa 100644 --- a/test/th5s.c +++ b/test/th5s.c @@ -212,7 +212,11 @@ test_h5s_basic(void) fid1 = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); CHECK_I(fid1, "H5Fopen"); if (fid1 >= 0) { - dset1 = H5Dopen2(fid1, "dset", H5P_DEFAULT); + H5E_BEGIN_TRY + { + dset1 = H5Dopen2(fid1, "dset", H5P_DEFAULT); + } + H5E_END_TRY; VERIFY(dset1, FAIL, "H5Dopen2"); ret = H5Fclose(fid1); CHECK_I(ret, "H5Fclose"); @@ -3476,7 +3480,7 @@ test_versionbounds(void) ** ****************************************************************/ void -test_h5s(void) +test_h5s(const void H5_ATTR_UNUSED *params) { H5F_libver_t low, high; /* Low and high bounds */ @@ -3534,15 +3538,17 @@ test_h5s(void) *------------------------------------------------------------------------- */ void -cleanup_h5s(void) +cleanup_h5s(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(DATAFILE, H5P_DEFAULT); - H5Fdelete(NULLFILE, H5P_DEFAULT); - H5Fdelete(BASICFILE, H5P_DEFAULT); - H5Fdelete(ZEROFILE, H5P_DEFAULT); - H5Fdelete(VERBFNAME, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(DATAFILE, H5P_DEFAULT); + H5Fdelete(NULLFILE, H5P_DEFAULT); + H5Fdelete(BASICFILE, H5P_DEFAULT); + H5Fdelete(ZEROFILE, H5P_DEFAULT); + H5Fdelete(VERBFNAME, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/tid.c b/test/tid.c index 8dac7f19e41..7d03872bfee 100644 --- a/test/tid.c +++ b/test/tid.c @@ -1495,7 +1495,7 @@ test_appropriate_ids(void) } void -test_ids(void) +test_ids(const void H5_ATTR_UNUSED *params) { /* Set the random # seed */ srand((unsigned)time(NULL)); diff --git a/test/titerate.c b/test/titerate.c index 3c0b82e68f2..6128508d1b1 100644 --- a/test/titerate.c +++ b/test/titerate.c @@ -322,7 +322,12 @@ test_iter_group(hid_t fapl, bool new_format) i = 0; idx = 0; memset(info.name, 0, NAMELEN); - while ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) > 0) { + H5E_BEGIN_TRY + { + ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); + } + H5E_END_TRY + while (ret > 0) { /* Verify return value from iterator gets propagated correctly */ VERIFY(ret, 2, "H5Literate2"); @@ -341,7 +346,13 @@ test_iter_group(hid_t fapl, bool new_format) TestErrPrintf( "Group iteration function didn't return name correctly for link - lnames[%u] = '%s'!\n", (unsigned)(idx - 1), lnames[(size_t)(idx - 1)]); - } /* end while */ + + H5E_BEGIN_TRY + { + ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); + } + H5E_END_TRY + } VERIFY(ret, -1, "H5Literate2"); if (i != (NDATASETS + 2)) @@ -354,7 +365,12 @@ test_iter_group(hid_t fapl, bool new_format) i = 0; idx = 0; memset(info.name, 0, NAMELEN); - while ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) >= 0) { + H5E_BEGIN_TRY + { + ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); + } + H5E_END_TRY + while (ret >= 0) { /* Verify return value from iterator gets propagated correctly */ VERIFY(ret, 1, "H5Literate2"); @@ -373,6 +389,12 @@ test_iter_group(hid_t fapl, bool new_format) TestErrPrintf( "Group iteration function didn't return name correctly for link - lnames[%u] = '%s'!\n", (unsigned)(idx - 1), lnames[(size_t)(idx - 1)]); + + H5E_BEGIN_TRY + { + ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); + } + H5E_END_TRY } /* end while */ VERIFY(ret, -1, "H5Literate2"); @@ -1221,7 +1243,7 @@ test_links_deprec(hid_t fapl) ** ****************************************************************/ void -test_iterate(void) +test_iterate(const void H5_ATTR_UNUSED *params) { hid_t fapl, fapl2; /* File access property lists */ unsigned new_format; /* Whether to use the new format or not */ @@ -1274,11 +1296,13 @@ test_iterate(void) *------------------------------------------------------------------------- */ void -cleanup_iterate(void) +cleanup_iterate(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(DATAFILE, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(DATAFILE, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/tmeta.c b/test/tmeta.c index 68f77c83dae..2e3978a2c38 100644 --- a/test/tmeta.c +++ b/test/tmeta.c @@ -28,18 +28,18 @@ static uint8_t compar_buffer[] = { /* Little-endian encoded version of the 16-bit signed integer */ - (uint8_t)((TEST_INT16_VALUE)&0xff), + (uint8_t)((TEST_INT16_VALUE) & 0xff), (uint8_t)((TEST_INT16_VALUE >> 8) & 0xff), /* Little-endian encoded version of the 16-bit unsigned integer */ - (uint8_t)((TEST_UINT16_VALUE)&0xff), + (uint8_t)((TEST_UINT16_VALUE) & 0xff), (uint8_t)((TEST_UINT16_VALUE >> 8) & 0xff), /* Little-endian encoded version of the 32-bit signed integer */ - (uint8_t)((TEST_INT32_VALUE)&0xff), + (uint8_t)((TEST_INT32_VALUE) & 0xff), (uint8_t)((TEST_INT32_VALUE >> 8) & 0xff), (uint8_t)((TEST_INT32_VALUE >> 16) & 0xff), (uint8_t)((TEST_INT32_VALUE >> 24) & 0xff), /* Little-endian encoded version of the 32-bit unsigned integer */ - (uint8_t)((TEST_UINT32_VALUE)&0xff), + (uint8_t)((TEST_UINT32_VALUE) & 0xff), (uint8_t)((TEST_UINT32_VALUE >> 8) & 0xff), (uint8_t)((TEST_UINT32_VALUE >> 16) & 0xff), (uint8_t)((TEST_UINT32_VALUE >> 24) & 0xff), @@ -53,7 +53,7 @@ static uint8_t encode_buffer[sizeof(compar_buffer)]; ** ****************************************************************/ void -test_metadata(void) +test_metadata(const void H5_ATTR_UNUSED *params) { int16_t ei16 = TEST_INT16_VALUE; /* variables to hold the values to encode */ uint16_t eu16 = TEST_UINT16_VALUE; @@ -121,7 +121,7 @@ test_metadata(void) *------------------------------------------------------------------------- */ void -cleanup_metadata(void) +cleanup_metadata(void H5_ATTR_UNUSED *params) { /* no file to clean */ } diff --git a/test/tmisc.c b/test/tmisc.c index 63bf5d8edcb..8c73aa61949 100644 --- a/test/tmisc.c +++ b/test/tmisc.c @@ -1196,11 +1196,19 @@ test_misc7(void) CHECK(tid, FAIL, "H5Tcreate"); /* Attempt to commit an empty compound datatype */ - ret = H5Tcommit2(fid, MISC7_TYPENAME1, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Tcommit2(fid, MISC7_TYPENAME1, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Tcommit2"); /* Attempt to use empty compound datatype to create dataset */ - did = H5Dcreate2(fid, MISC7_DSETNAME1, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + did = H5Dcreate2(fid, MISC7_DSETNAME1, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Dcreate2"); /* Add a field to the compound datatype */ @@ -1228,11 +1236,19 @@ test_misc7(void) CHECK(tid, FAIL, "H5Tenum_create"); /* Attempt to commit an empty enum datatype */ - ret = H5Tcommit2(fid, MISC7_TYPENAME2, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + ret = H5Tcommit2(fid, MISC7_TYPENAME2, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Tcommit2"); /* Attempt to use empty enum datatype to create dataset */ - did = H5Dcreate2(fid, MISC7_DSETNAME2, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + did = H5Dcreate2(fid, MISC7_DSETNAME2, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY VERIFY(did, FAIL, "H5Dcreate2"); /* Add a member to the enum datatype */ @@ -3821,7 +3837,11 @@ test_misc20(void) CHECK(dcpl, FAIL, "H5Pcreate"); /* Try to use chunked storage for this dataset */ - ret = H5Pset_chunk(dcpl, rank, big_dims); + H5E_BEGIN_TRY + { + ret = H5Pset_chunk(dcpl, rank, big_dims); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Pset_chunk"); /* Verify that the storage for the dataset is the correct size and hasn't @@ -6569,7 +6589,7 @@ test_misc39(void) * the object should have a reference count of 1 since the file * was just created. */ - VERIFY(file_vol_obj->rc, 1, "checking reference count"); + VERIFY(H5VL_OBJ_RC(file_vol_obj), 1, "checking reference count"); /* Create a variable-length string type */ str_type = H5Tcopy(H5T_C_S1); @@ -6630,7 +6650,7 @@ test_misc39(void) * associate each attribute's datatype with the file's VOL object * and will have incremented the reference count by 5. */ - VERIFY(file_vol_obj->rc, 6, "checking reference count"); + VERIFY(H5VL_OBJ_RC(file_vol_obj), 6, "checking reference count"); /* Increments file's VOL object reference count by 1 */ ret = H5Awrite(attr_id1, str_type, buf); @@ -6662,7 +6682,7 @@ test_misc39(void) * incrementing the reference count of the associated file's VOL * object. */ - VERIFY(file_vol_obj->rc, 12, "checking reference count"); + VERIFY(H5VL_OBJ_RC(file_vol_obj), 12, "checking reference count"); ret = H5Aclose(attr_id1); CHECK(ret, FAIL, "H5Aclose"); @@ -6696,7 +6716,7 @@ test_misc39(void) * the object should have a reference count of 1 since the file * was just opened. */ - VERIFY(file_vol_obj->rc, 1, "checking reference count"); + VERIFY(H5VL_OBJ_RC(file_vol_obj), 1, "checking reference count"); /* Increments file's VOL object reference count by 1 */ attr_id1 = H5Aopen(file_id, "varstr_attribute", H5P_DEFAULT); @@ -6717,7 +6737,7 @@ test_misc39(void) * the attributes will also have associated their datatypes with * the file's VOL object. */ - VERIFY(file_vol_obj->rc, 6, "checking reference count"); + VERIFY(H5VL_OBJ_RC(file_vol_obj), 6, "checking reference count"); /* Increments file's VOL object reference count by 1 */ ret = H5Aread(attr_id1, str_type, rbuf); @@ -6749,7 +6769,7 @@ test_misc39(void) * incrementing the reference count of the associated file's VOL * object. */ - VERIFY(file_vol_obj->rc, 12, "checking reference count"); + VERIFY(H5VL_OBJ_RC(file_vol_obj), 12, "checking reference count"); ret = H5Treclaim(str_type, space_id, H5P_DEFAULT, rbuf); ret = H5Treclaim(array_type, space_id, H5P_DEFAULT, arr_rbuf); @@ -7105,7 +7125,7 @@ test_misc41(void) ** ****************************************************************/ void -test_misc(void) +test_misc(const void H5_ATTR_UNUSED *params) { bool default_driver = h5_using_default_driver(NULL); @@ -7188,52 +7208,54 @@ test_misc(void) *------------------------------------------------------------------------- */ void -cleanup_misc(void) +cleanup_misc(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(MISC1_FILE, H5P_DEFAULT); - H5Fdelete(MISC2_FILE_1, H5P_DEFAULT); - H5Fdelete(MISC2_FILE_2, H5P_DEFAULT); - H5Fdelete(MISC3_FILE, H5P_DEFAULT); - H5Fdelete(MISC4_FILE_1, H5P_DEFAULT); - H5Fdelete(MISC4_FILE_2, H5P_DEFAULT); - H5Fdelete(MISC5_FILE, H5P_DEFAULT); - H5Fdelete(MISC6_FILE, H5P_DEFAULT); - H5Fdelete(MISC7_FILE, H5P_DEFAULT); - H5Fdelete(MISC8_FILE, H5P_DEFAULT); - H5Fdelete(MISC9_FILE, H5P_DEFAULT); - H5Fdelete(MISC10_FILE_NEW, H5P_DEFAULT); - H5Fdelete(MISC11_FILE, H5P_DEFAULT); - H5Fdelete(MISC12_FILE, H5P_DEFAULT); - H5Fdelete(MISC13_FILE_1, H5P_DEFAULT); - H5Fdelete(MISC13_FILE_2, H5P_DEFAULT); - H5Fdelete(MISC14_FILE, H5P_DEFAULT); - H5Fdelete(MISC15_FILE, H5P_DEFAULT); - H5Fdelete(MISC16_FILE, H5P_DEFAULT); - H5Fdelete(MISC17_FILE, H5P_DEFAULT); - H5Fdelete(MISC18_FILE, H5P_DEFAULT); - H5Fdelete(MISC19_FILE, H5P_DEFAULT); - H5Fdelete(MISC20_FILE, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(MISC1_FILE, H5P_DEFAULT); + H5Fdelete(MISC2_FILE_1, H5P_DEFAULT); + H5Fdelete(MISC2_FILE_2, H5P_DEFAULT); + H5Fdelete(MISC3_FILE, H5P_DEFAULT); + H5Fdelete(MISC4_FILE_1, H5P_DEFAULT); + H5Fdelete(MISC4_FILE_2, H5P_DEFAULT); + H5Fdelete(MISC5_FILE, H5P_DEFAULT); + H5Fdelete(MISC6_FILE, H5P_DEFAULT); + H5Fdelete(MISC7_FILE, H5P_DEFAULT); + H5Fdelete(MISC8_FILE, H5P_DEFAULT); + H5Fdelete(MISC9_FILE, H5P_DEFAULT); + H5Fdelete(MISC10_FILE_NEW, H5P_DEFAULT); + H5Fdelete(MISC11_FILE, H5P_DEFAULT); + H5Fdelete(MISC12_FILE, H5P_DEFAULT); + H5Fdelete(MISC13_FILE_1, H5P_DEFAULT); + H5Fdelete(MISC13_FILE_2, H5P_DEFAULT); + H5Fdelete(MISC14_FILE, H5P_DEFAULT); + H5Fdelete(MISC15_FILE, H5P_DEFAULT); + H5Fdelete(MISC16_FILE, H5P_DEFAULT); + H5Fdelete(MISC17_FILE, H5P_DEFAULT); + H5Fdelete(MISC18_FILE, H5P_DEFAULT); + H5Fdelete(MISC19_FILE, H5P_DEFAULT); + H5Fdelete(MISC20_FILE, H5P_DEFAULT); #ifdef H5_HAVE_FILTER_SZIP - H5Fdelete(MISC21_FILE, H5P_DEFAULT); - H5Fdelete(MISC22_FILE, H5P_DEFAULT); + H5Fdelete(MISC21_FILE, H5P_DEFAULT); + H5Fdelete(MISC22_FILE, H5P_DEFAULT); #endif /* H5_HAVE_FILTER_SZIP */ - H5Fdelete(MISC23_FILE, H5P_DEFAULT); - H5Fdelete(MISC24_FILE, H5P_DEFAULT); - H5Fdelete(MISC25A_FILE, H5P_DEFAULT); - H5Fdelete(MISC25C_FILE, H5P_DEFAULT); - H5Fdelete(MISC26_FILE, H5P_DEFAULT); - H5Fdelete(MISC28_FILE, H5P_DEFAULT); - H5Fdelete(MISC29_COPY_FILE, H5P_DEFAULT); - H5Fdelete(MISC30_FILE, H5P_DEFAULT); + H5Fdelete(MISC23_FILE, H5P_DEFAULT); + H5Fdelete(MISC24_FILE, H5P_DEFAULT); + H5Fdelete(MISC25A_FILE, H5P_DEFAULT); + H5Fdelete(MISC25C_FILE, H5P_DEFAULT); + H5Fdelete(MISC26_FILE, H5P_DEFAULT); + H5Fdelete(MISC28_FILE, H5P_DEFAULT); + H5Fdelete(MISC29_COPY_FILE, H5P_DEFAULT); + H5Fdelete(MISC30_FILE, H5P_DEFAULT); #ifndef H5_NO_DEPRECATED_SYMBOLS - H5Fdelete(MISC31_FILE, H5P_DEFAULT); + H5Fdelete(MISC31_FILE, H5P_DEFAULT); #endif /* H5_NO_DEPRECATED_SYMBOLS */ - H5Fdelete(MISC38C_FILE, H5P_DEFAULT); - H5Fdelete(MISC39_FILE, H5P_DEFAULT); - H5Fdelete(MISC40_FILE, H5P_DEFAULT); - H5Fdelete(MISC41_FILE, H5P_DEFAULT); + H5Fdelete(MISC38C_FILE, H5P_DEFAULT); + H5Fdelete(MISC39_FILE, H5P_DEFAULT); + H5Fdelete(MISC40_FILE, H5P_DEFAULT); + H5Fdelete(MISC41_FILE, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } /* end cleanup_misc() */ diff --git a/test/trefer.c b/test/trefer.c index b1e4a3854b4..e2a09c0a21e 100644 --- a/test/trefer.c +++ b/test/trefer.c @@ -2254,7 +2254,11 @@ test_reference_obj_deleted(void) CHECK(ret, FAIL, "H5Dread"); /* Open deleted dataset object */ - dset2 = H5Ropen_object(&oref, H5P_DEFAULT, H5P_DEFAULT); + H5E_BEGIN_TRY + { + dset2 = H5Ropen_object(&oref, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object"); /* Close Dataset */ @@ -3830,7 +3834,7 @@ test_reference_perf(void) ** ****************************************************************/ void -test_reference(void) +test_reference(const void H5_ATTR_UNUSED *params) { H5F_libver_t low, high; /* Low and high bounds */ const char *driver_name; /* File Driver value from environment */ @@ -3884,22 +3888,24 @@ test_reference(void) *------------------------------------------------------------------------- */ void -cleanup_reference(void) +cleanup_reference(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(FILE_REF_PARAM, H5P_DEFAULT); - H5Fdelete(FILE_REF_OBJ, H5P_DEFAULT); - H5Fdelete(FILE_REF_VL_OBJ, H5P_DEFAULT); - H5Fdelete(FILE_REF_CMPND_OBJ, H5P_DEFAULT); - H5Fdelete(FILE_REF_REG, H5P_DEFAULT); - H5Fdelete(FILE_REF_REG_1D, H5P_DEFAULT); - H5Fdelete(FILE_REF_OBJ_DEL, H5P_DEFAULT); - H5Fdelete(FILE_REF_GRP, H5P_DEFAULT); - H5Fdelete(FILE_REF_ATTR, H5P_DEFAULT); - H5Fdelete(FILE_REF_EXT1, H5P_DEFAULT); - H5Fdelete(FILE_REF_EXT2, H5P_DEFAULT); - H5Fdelete(FILE_REF_COMPAT, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(FILE_REF_PARAM, H5P_DEFAULT); + H5Fdelete(FILE_REF_OBJ, H5P_DEFAULT); + H5Fdelete(FILE_REF_VL_OBJ, H5P_DEFAULT); + H5Fdelete(FILE_REF_CMPND_OBJ, H5P_DEFAULT); + H5Fdelete(FILE_REF_REG, H5P_DEFAULT); + H5Fdelete(FILE_REF_REG_1D, H5P_DEFAULT); + H5Fdelete(FILE_REF_OBJ_DEL, H5P_DEFAULT); + H5Fdelete(FILE_REF_GRP, H5P_DEFAULT); + H5Fdelete(FILE_REF_ATTR, H5P_DEFAULT); + H5Fdelete(FILE_REF_EXT1, H5P_DEFAULT); + H5Fdelete(FILE_REF_EXT2, H5P_DEFAULT); + H5Fdelete(FILE_REF_COMPAT, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/trefer_deprec.c b/test/trefer_deprec.c index bbcf630fbc3..3035cbb2045 100644 --- a/test/trefer_deprec.c +++ b/test/trefer_deprec.c @@ -166,53 +166,134 @@ test_reference_params(void) CHECK(ret, FAIL, "H5Dcreate2"); /* Test parameters to H5Rcreate */ - ret = H5Rcreate(NULL, fid1, "/Group1/Dataset1", H5R_OBJECT, (hid_t)H5I_INVALID_HID); + H5E_BEGIN_TRY + { + ret = H5Rcreate(NULL, fid1, "/Group1/Dataset1", H5R_OBJECT, (hid_t)H5I_INVALID_HID); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Rcreate ref"); - ret = H5Rcreate(&wbuf[0], (hid_t)H5I_INVALID_HID, "/Group1/Dataset1", H5R_OBJECT, (hid_t)H5I_INVALID_HID); + H5E_BEGIN_TRY + { + ret = H5Rcreate(&wbuf[0], (hid_t)H5I_INVALID_HID, "/Group1/Dataset1", H5R_OBJECT, + (hid_t)H5I_INVALID_HID); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Rcreate loc_id"); - ret = H5Rcreate(&wbuf[0], fid1, NULL, H5R_OBJECT, (hid_t)H5I_INVALID_HID); + H5E_BEGIN_TRY + { + ret = H5Rcreate(&wbuf[0], fid1, NULL, H5R_OBJECT, (hid_t)H5I_INVALID_HID); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Rcreate name"); - ret = H5Rcreate(&wbuf[0], fid1, "", H5R_OBJECT, (hid_t)H5I_INVALID_HID); + H5E_BEGIN_TRY + { + ret = H5Rcreate(&wbuf[0], fid1, "", H5R_OBJECT, (hid_t)H5I_INVALID_HID); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Rcreate null name"); - ret = H5Rcreate(&wbuf[0], fid1, "/Group1/Dataset1", H5R_MAXTYPE, (hid_t)H5I_INVALID_HID); + H5E_BEGIN_TRY + { + ret = H5Rcreate(&wbuf[0], fid1, "/Group1/Dataset1", H5R_MAXTYPE, (hid_t)H5I_INVALID_HID); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Rcreate type"); - ret = H5Rcreate(&wbuf[0], fid1, "/Group1/Dataset1", H5R_DATASET_REGION, (hid_t)H5I_INVALID_HID); + H5E_BEGIN_TRY + { + ret = H5Rcreate(&wbuf[0], fid1, "/Group1/Dataset1", H5R_DATASET_REGION, (hid_t)H5I_INVALID_HID); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Rcreate region space"); - ret = H5Rcreate(&wbuf[0], fid1, "/Group1/Dataset1", H5R_MAXTYPE, (hid_t)0); + H5E_BEGIN_TRY + { + ret = H5Rcreate(&wbuf[0], fid1, "/Group1/Dataset1", H5R_MAXTYPE, (hid_t)0); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Rcreate space"); /* Test parameters to H5Rdereference */ - dset2 = H5Rdereference2((hid_t)H5I_INVALID_HID, H5P_DEFAULT, H5R_OBJECT, &rbuf[0]); + H5E_BEGIN_TRY + { + dset2 = H5Rdereference2((hid_t)H5I_INVALID_HID, H5P_DEFAULT, H5R_OBJECT, &rbuf[0]); + } + H5E_END_TRY VERIFY(dset2, FAIL, "H5Rdereference2 loc_id"); - dset2 = H5Rdereference2(dataset, (hid_t)H5I_INVALID_HID, H5R_OBJECT, &rbuf[0]); + H5E_BEGIN_TRY + { + dset2 = H5Rdereference2(dataset, (hid_t)H5I_INVALID_HID, H5R_OBJECT, &rbuf[0]); + } + H5E_END_TRY VERIFY(dset2, FAIL, "H5Rdereference2 oapl_id"); - dset2 = H5Rdereference2(dataset, dapl_id, H5R_OBJECT, NULL); + H5E_BEGIN_TRY + { + dset2 = H5Rdereference2(dataset, dapl_id, H5R_OBJECT, NULL); + } + H5E_END_TRY VERIFY(dset2, FAIL, "H5Rdereference2 ref"); - dset2 = H5Rdereference2(dataset, dapl_id, H5R_MAXTYPE, &rbuf[0]); + H5E_BEGIN_TRY + { + dset2 = H5Rdereference2(dataset, dapl_id, H5R_MAXTYPE, &rbuf[0]); + } + H5E_END_TRY VERIFY(dset2, FAIL, "H5Rdereference2 type"); /* Test parameters to H5Rget_obj_type2 */ - ret = H5Rget_obj_type2((hid_t)H5I_INVALID_HID, H5R_OBJECT, &rbuf[0], NULL); + H5E_BEGIN_TRY + { + ret = H5Rget_obj_type2((hid_t)H5I_INVALID_HID, H5R_OBJECT, &rbuf[0], NULL); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Rget_obj_type2 loc_id"); - ret = H5Rget_obj_type2(fid1, H5R_OBJECT, NULL, NULL); + H5E_BEGIN_TRY + { + ret = H5Rget_obj_type2(fid1, H5R_OBJECT, NULL, NULL); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Rget_obj_type2 ref"); - ret = H5Rget_obj_type2(fid1, H5R_MAXTYPE, &rbuf[0], NULL); + H5E_BEGIN_TRY + { + ret = H5Rget_obj_type2(fid1, H5R_MAXTYPE, &rbuf[0], NULL); + } + H5E_END_TRY VERIFY(ret, FAIL, "H5Rget_obj_type2 type"); /* Test parameters to H5Rget_name */ - name_size = H5Rget_name((hid_t)H5I_INVALID_HID, H5R_DATASET_REGION, &rbuf[0], NULL, 0); + H5E_BEGIN_TRY + { + name_size = H5Rget_name((hid_t)H5I_INVALID_HID, H5R_DATASET_REGION, &rbuf[0], NULL, 0); + } + H5E_END_TRY VERIFY(name_size, FAIL, "H5Rget_name loc_id"); - name_size = H5Rget_name(fid1, H5R_DATASET_REGION, NULL, NULL, 0); + H5E_BEGIN_TRY + { + name_size = H5Rget_name(fid1, H5R_DATASET_REGION, NULL, NULL, 0); + } + H5E_END_TRY VERIFY(name_size, FAIL, "H5Rget_name ref"); - name_size = H5Rget_name(fid1, H5R_MAXTYPE, &rbuf[0], NULL, 0); + H5E_BEGIN_TRY + { + name_size = H5Rget_name(fid1, H5R_MAXTYPE, &rbuf[0], NULL, 0); + } + H5E_END_TRY VERIFY(name_size, FAIL, "H5Rget_name type"); /* Test parameters to H5Rget_region */ - ret_id = H5Rget_region((hid_t)H5I_INVALID_HID, H5R_OBJECT, &rbuf[0]); + H5E_BEGIN_TRY + { + ret_id = H5Rget_region((hid_t)H5I_INVALID_HID, H5R_OBJECT, &rbuf[0]); + } + H5E_END_TRY VERIFY(ret_id, FAIL, "H5Rget_region loc_id"); - ret_id = H5Rget_region(fid1, H5R_OBJECT, NULL); + H5E_BEGIN_TRY + { + ret_id = H5Rget_region(fid1, H5R_OBJECT, NULL); + } + H5E_END_TRY VERIFY(ret_id, FAIL, "H5Rget_region ref"); - ret_id = H5Rget_region(fid1, H5R_OBJECT, &rbuf[0]); + H5E_BEGIN_TRY + { + ret_id = H5Rget_region(fid1, H5R_OBJECT, &rbuf[0]); + } + H5E_END_TRY VERIFY(ret_id, FAIL, "H5Rget_region type"); /* Close disk dataspace */ @@ -773,7 +854,11 @@ test_reference_region(H5F_libver_t libver_low, H5F_libver_t libver_high) /* Try to read an unaddressed dataset */ memset(&undef_reg, 0, sizeof(undef_reg)); - dset2 = H5Rdereference2(dset1, dapl_id, H5R_DATASET_REGION, undef_reg); + H5E_BEGIN_TRY + { + dset2 = H5Rdereference2(dset1, dapl_id, H5R_DATASET_REGION, undef_reg); + } + H5E_END_TRY VERIFY(dset2, FAIL, "H5Rdereference2 haddr_undef"); /* Try to open objects */ @@ -1314,7 +1399,11 @@ test_reference_obj_deleted(void) CHECK(ret, FAIL, "H5Dopen2"); /* Open undefined reference */ - dset2 = H5Rdereference2(dataset, H5P_DEFAULT, H5R_OBJECT, &addr); + H5E_BEGIN_TRY + { + dset2 = H5Rdereference2(dataset, H5P_DEFAULT, H5R_OBJECT, &addr); + } + H5E_END_TRY VERIFY(dset2, FAIL, "H5Rdereference2"); /* Read selection from disk */ @@ -1323,12 +1412,20 @@ test_reference_obj_deleted(void) CHECK(ret, FAIL, "H5Dread"); /* Open deleted dataset object */ - dset2 = H5Rdereference2(dataset, H5P_DEFAULT, H5R_OBJECT, &oref); + H5E_BEGIN_TRY + { + dset2 = H5Rdereference2(dataset, H5P_DEFAULT, H5R_OBJECT, &oref); + } + H5E_END_TRY VERIFY(dset2, FAIL, "H5Rdereference2"); /* Open nonsense reference */ memset(&oref, 0, sizeof(hobj_ref_t)); - dset2 = H5Rdereference2(dataset, H5P_DEFAULT, H5R_OBJECT, &oref); + H5E_BEGIN_TRY + { + dset2 = H5Rdereference2(dataset, H5P_DEFAULT, H5R_OBJECT, &oref); + } + H5E_END_TRY VERIFY(dset2, FAIL, "H5Rdereference2"); /* Close Dataset */ @@ -1771,10 +1868,18 @@ test_reference_compat(void) CHECK(obj_type, H5G_UNKNOWN, "H5Rget_obj_type1"); VERIFY(obj_type, H5G_DATASET, "H5Rget_obj_type1"); - obj_type = H5Rget_obj_type1(dataset, H5R_DATASET_REGION, &rbuf_reg[2]); + H5E_BEGIN_TRY + { + obj_type = H5Rget_obj_type1(dataset, H5R_DATASET_REGION, &rbuf_reg[2]); + } + H5E_END_TRY VERIFY(obj_type, H5G_UNKNOWN, "H5Rget_obj_type1"); - obj_type = H5Rget_obj_type1(dataset, H5R_DATASET_REGION, &rbuf_reg[3]); + H5E_BEGIN_TRY + { + obj_type = H5Rget_obj_type1(dataset, H5R_DATASET_REGION, &rbuf_reg[3]); + } + H5E_END_TRY VERIFY(obj_type, H5G_UNKNOWN, "H5Rget_obj_type1"); /* Make sure the referenced objects can be opened */ @@ -1812,7 +1917,7 @@ test_reference_compat(void) ** ****************************************************************/ void -test_reference_deprec(void) +test_reference_deprec(const void H5_ATTR_UNUSED *params) { H5F_libver_t low, high; /* Low and high bounds */ bool vol_is_native; @@ -1862,9 +1967,11 @@ test_reference_deprec(void) *------------------------------------------------------------------------- */ void -cleanup_reference_deprec(void) +cleanup_reference_deprec(void H5_ATTR_UNUSED *params) { - HDremove(FILE1); - HDremove(FILE2); - HDremove(FILE3); + if (GetTestCleanup()) { + HDremove(FILE1); + HDremove(FILE2); + HDremove(FILE3); + } } diff --git a/test/trefstr.c b/test/trefstr.c index cd7ddcb44e3..3cd49c33e98 100644 --- a/test/trefstr.c +++ b/test/trefstr.c @@ -512,7 +512,7 @@ test_refstr_finalize(void) ** ****************************************************************/ void -test_refstr(void) +test_refstr(const void H5_ATTR_UNUSED *params) { /* Output message about test being performed */ MESSAGE(5, ("Testing Reference Counted Strings\n")); diff --git a/test/tselect.c b/test/tselect.c index 5167aba7e06..a2dcf269a8e 100644 --- a/test/tselect.c +++ b/test/tselect.c @@ -16063,7 +16063,7 @@ test_h5s_set_extent_none(void) ** ****************************************************************/ void -test_select(void) +test_select(const void H5_ATTR_UNUSED *params) { hid_t plist_id; /* Property list for reading random hyperslabs */ hid_t fapl; /* Property list accessing the file */ @@ -16275,11 +16275,13 @@ test_select(void) *------------------------------------------------------------------------- */ void -cleanup_select(void) +cleanup_select(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(FILENAME, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/tskiplist.c b/test/tskiplist.c index 25ee35c0418..53d0d958645 100644 --- a/test/tskiplist.c +++ b/test/tskiplist.c @@ -529,9 +529,9 @@ test_skiplist_string(void) const char *s; } string_node; string_node data[10] = {{10, "10"}, {20, "20"}, {15, "15"}, {5, "05"}, {50, "50"}, - {30, "30"}, {31, "31"}, {32, "32"}, {80, "80"}, {90, "90"}}; + {30, "30"}, {31, "31"}, {32, "32"}, {80, "80"}, {90, "90"}}; string_node hashed_data[10] = {{5, "05"}, {10, "10"}, {15, "15"}, {20, "20"}, {30, "30"}, - {31, "31"}, {32, "32"}, {50, "50"}, {80, "80"}, {90, "90"}}; + {31, "31"}, {32, "32"}, {50, "50"}, {80, "80"}, {90, "90"}}; string_node *found_item; /* Item found in skip list */ herr_t ret; /* Generic return value */ @@ -753,9 +753,9 @@ test_skiplist_obj(void) size_t num; /* Number of elements in skip list */ size_t u; /* Local index variable */ H5_obj_t data[10] = {{10, 12}, {20, 12}, {10, 32}, {10, 11}, {50, 1}, - {8, 12}, {31, 12}, {20, 11}, {31, 11}, {8, 32}}; + {8, 12}, {31, 12}, {20, 11}, {31, 11}, {8, 32}}; H5_obj_t sorted_data[10] = {{8, 12}, {8, 32}, {10, 11}, {10, 12}, {10, 32}, - {20, 11}, {20, 12}, {31, 11}, {31, 12}, {50, 1}}; + {20, 11}, {20, 12}, {31, 11}, {31, 12}, {50, 1}}; H5_obj_t *found_item; /* Item found in skip list */ herr_t ret; /* Generic return value */ @@ -834,9 +834,9 @@ test_skiplist_generic(void) size_t num; /* Number of elements in skip list */ size_t u; /* Local index variable */ generic_t data[10] = {{10, 1}, {20, 13}, {15, 32}, {5, 2}, {50, 37}, - {30, 100}, {31, 38}, {32, 34}, {80, 32}, {90, 0}}; + {30, 100}, {31, 38}, {32, 34}, {80, 32}, {90, 0}}; generic_t sorted_data[10] = {{30, 100}, {15, 32}, {31, 38}, {32, 34}, {5, 2}, - {20, 13}, {10, 1}, {50, 37}, {80, 32}, {90, 0}}; + {20, 13}, {10, 1}, {50, 37}, {80, 32}, {90, 0}}; generic_t *found_item; /* Item found in skip list */ herr_t ret; /* Generic return value */ @@ -1555,7 +1555,7 @@ test_skiplist_term(void) ** ****************************************************************/ void -test_skiplist(void) +test_skiplist(const void H5_ATTR_UNUSED *params) { /* Output message about test being performed */ MESSAGE(5, ("Testing Skip Lists\n")); diff --git a/test/tsohm.c b/test/tsohm.c index cc639a6a2b2..b054e6d170c 100644 --- a/test/tsohm.c +++ b/test/tsohm.c @@ -71,8 +71,8 @@ typedef struct dtype1_struct { #define DTYPE2_SIZE 1024 static const char *DSETNAME[] = {"dataset0", "dataset1", "dataset2", "dataset3", "dataset4", - "dataset5", "dataset6", "dataset7", "dataset8", "dataset9", - "dataset10", "dataset11", NULL}; + "dataset5", "dataset6", "dataset7", "dataset8", "dataset9", + "dataset10", "dataset11", NULL}; static const char *EXTRA_DSETNAME[] = {"ex_dataset0", "ex_dataset1", "ex_dataset2", "ex_dataset3", "ex_dataset4", "ex_dataset5", "ex_dataset6", "ex_dataset7", "ex_dataset8", @@ -94,7 +94,7 @@ static const char *ENUM_NAME[] = {"enum_member0", "enum_member1", "enum_member "enum_member15", "enum_member16", "enum_member17", "enum_member18", "enum_member19", NULL}; static const int ENUM_VAL[] = {0, 13, -500, 63, 64, -64, 65, 2048, 1, 2, -1, - 7, 130, -5000, 630, 640, -640, 650, 20480, 10, -1001, -10}; + 7, 130, -5000, 630, 640, -640, 650, 20480, 10, -1001, -10}; #define SIZE2_RANK1 6 #define SIZE2_RANK2 10 #define SIZE2_DIMS \ @@ -615,7 +615,7 @@ size1_helper(hid_t file, const char *filename, hid_t fapl_id, bool test_file_clo /* Closing and re-opening the file takes a long time on systems without * local disks. Don't close and reopen if express testing is enabled. */ - if (TestExpress > 1) + if (h5_get_testexpress() > 1) test_file_closing = false; /* Initialize wdata */ @@ -1553,7 +1553,7 @@ size2_helper(hid_t fcpl_id, int test_file_closing, size2_helper_struct *ret_size /* Closing and re-opening the file takes a long time on systems without * local disks. Don't close and reopen if express testing is enabled. */ - if (TestExpress > 1) + if (h5_get_testexpress() > 1) test_file_closing = 0; /* Create a file and get its size */ @@ -3708,7 +3708,7 @@ test_sohm_external_dtype(void) ** ****************************************************************/ void -test_sohm(void) +test_sohm(const void H5_ATTR_UNUSED *params) { const char *driver_name; bool vol_is_native; @@ -3767,9 +3767,11 @@ test_sohm(void) *------------------------------------------------------------------------- */ void -cleanup_sohm(void) +cleanup_sohm(void H5_ATTR_UNUSED *params) { - HDremove(FILENAME); - HDremove(FILENAME_SRC); - HDremove(FILENAME_DST); + if (GetTestCleanup()) { + HDremove(FILENAME); + HDremove(FILENAME_SRC); + HDremove(FILENAME_DST); + } } /* cleanup_sohm */ diff --git a/test/ttime.c b/test/ttime.c index 08e9f967ab7..3d9ab879565 100644 --- a/test/ttime.c +++ b/test/ttime.c @@ -198,7 +198,7 @@ test_time_io(void) ** ****************************************************************/ void -test_time(void) +test_time(const void H5_ATTR_UNUSED *params) { /* Output message about test being performed */ MESSAGE(5, ("Testing Time Datatypes\n")); @@ -220,11 +220,13 @@ test_time(void) *------------------------------------------------------------------------- */ void -cleanup_time(void) +cleanup_time(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(DATAFILE, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(DATAFILE, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/ttsafe.c b/test/ttsafe.c index 2f5f26db331..b1cf42f4e0a 100644 --- a/test/ttsafe.c +++ b/test/ttsafe.c @@ -51,7 +51,7 @@ num_digits(int num) /* Test the H5is_library_threadsafe() function */ void -tts_is_threadsafe(void) +tts_is_threadsafe(const void H5_ATTR_UNUSED *params) { bool is_ts; bool should_be; @@ -97,7 +97,10 @@ main(int argc, char *argv[]) { /* Initialize testing framework */ - TestInit(argv[0], NULL, NULL); + if (TestInit(argv[0], NULL, NULL, NULL, NULL, 0) < 0) { + fprintf(stderr, "couldn't initialize testing framework\n"); + return -1; + } #ifdef H5_HAVE_THREADS MESSAGE(2, ("\nConcurrency Configuration:\n")); @@ -120,41 +123,43 @@ main(int argc, char *argv[]) #endif /* Tests are generally arranged from least to most complexity... */ - AddTest("is_threadsafe", tts_is_threadsafe, NULL, "library threadsafe status", NULL); + AddTest("is_threadsafe", tts_is_threadsafe, NULL, NULL, NULL, 0, "library threadsafe status"); #ifdef H5_HAVE_THREADS - AddTest("thread_pool", tts_thread_pool, NULL, "thread pools", NULL); + AddTest("thread_pool", tts_thread_pool, NULL, NULL, NULL, 0, "thread pools"); #ifndef H5_HAVE_STDATOMIC_H /* C11 atomics only tested when emulated */ - AddTest("atomics", tts_atomics, NULL, "emulation of C11 atomics", NULL); + AddTest("atomics", tts_atomics, NULL, NULL, NULL, 0, "emulation of C11 atomics"); #endif /* H5_HAVE_STDATOMIC_H */ - AddTest("rwlock", tts_rwlock, NULL, "simple R/W locks", NULL); + AddTest("rwlock", tts_rwlock, NULL, NULL, NULL, 0, "simple R/W locks"); #ifndef H5_HAVE_WIN_THREADS /* Recursive R/W locks */ - AddTest("rec_rwlock_1", tts_rec_rwlock_smoke_check_1, NULL, "recursive R/W lock smoke check 1 -- basic", - NULL); - AddTest("rec_rwlock_2", tts_rec_rwlock_smoke_check_2, NULL, - "recursive R/W lock smoke check 2 -- mob of readers", NULL); - AddTest("rec_rwlock_3", tts_rec_rwlock_smoke_check_3, NULL, - "recursive R/W lock smoke check 3 -- mob of writers", NULL); - AddTest("rec_rwlock_4", tts_rec_rwlock_smoke_check_4, NULL, - "recursive R/W lock smoke check 4 -- mixed mob", NULL); + AddTest("rec_rwlock_1", tts_rec_rwlock_smoke_check_1, NULL, NULL, NULL, 0, + "recursive R/W lock smoke check 1 -- basic"); + AddTest("rec_rwlock_2", tts_rec_rwlock_smoke_check_2, NULL, NULL, NULL, 0, + "recursive R/W lock smoke check 2 -- mob of readers"); + AddTest("rec_rwlock_3", tts_rec_rwlock_smoke_check_3, NULL, NULL, NULL, 0, + "recursive R/W lock smoke check 3 -- mob of writers"); + AddTest("rec_rwlock_4", tts_rec_rwlock_smoke_check_4, NULL, NULL, NULL, 0, + "recursive R/W lock smoke check 4 -- mixed mob"); #endif /* !H5_HAVE_WIN_THREADS */ - AddTest("semaphore", tts_semaphore, NULL, "lightweight system semaphores", NULL); + AddTest("semaphore", tts_semaphore, NULL, NULL, NULL, 0, "lightweight system semaphores"); #ifdef H5_HAVE_THREADSAFE - AddTest("thread_id", tts_thread_id, NULL, "thread IDs", NULL); + AddTest("thread_id", tts_thread_id, NULL, NULL, NULL, 0, "thread IDs"); - AddTest("dcreate", tts_dcreate, cleanup_dcreate, "multi-dataset creation", NULL); - AddTest("error", tts_error, cleanup_error, "per-thread error stacks", NULL); + /* Error stack test must be done after thread_id test to not mess up expected IDs */ + AddTest("error_stacks", tts_error_stacks, NULL, NULL, NULL, 0, "error stack tests"); + AddTest("dcreate", tts_dcreate, NULL, cleanup_dcreate, NULL, 0, "multi-dataset creation"); + AddTest("error", tts_error, NULL, cleanup_error, NULL, 0, "per-thread error stacks"); #ifdef H5_HAVE_PTHREAD_H /* Thread cancellability only supported with pthreads ... */ - AddTest("cancel", tts_cancel, cleanup_cancel, "thread cancellation safety test", NULL); + AddTest("cancel", tts_cancel, NULL, cleanup_cancel, NULL, 0, "thread cancellation safety test"); #endif /* H5_HAVE_PTHREAD_H */ - AddTest("acreate", tts_acreate, cleanup_acreate, "multi-attribute creation", NULL); - AddTest("attr_vlen", tts_attr_vlen, cleanup_attr_vlen, "multi-file-attribute-vlen read", NULL); + AddTest("acreate", tts_acreate, NULL, cleanup_acreate, NULL, 0, "multi-attribute creation"); + AddTest("attr_vlen", tts_attr_vlen, NULL, cleanup_attr_vlen, NULL, 0, "multi-file-attribute-vlen read"); /* Developer API routine tests */ - AddTest("developer", tts_develop_api, NULL, "developer API routines", NULL); + AddTest("developer", tts_develop_api, NULL, NULL, NULL, 0, "developer API routines"); #else /* H5_HAVE_THREADSAFE */ @@ -169,24 +174,31 @@ main(int argc, char *argv[]) #endif /* H5_HAVE_THREADS */ /* Display testing information */ - TestInfo(argv[0]); + TestInfo(stdout); /* Parse command line arguments */ - TestParseCmdLine(argc, argv); + if (TestParseCmdLine(argc, argv) < 0) { + fprintf(stderr, "couldn't parse command-line arguments\n"); + TestShutdown(); + return -1; + } /* Perform requested testing */ - PerformTests(); + if (PerformTests() < 0) { + fprintf(stderr, "couldn't run tests\n"); + TestShutdown(); + return -1; + } /* Display test summary, if requested */ if (GetTestSummary()) - TestSummary(); - - /* Clean up test files, if allowed */ - if (GetTestCleanup() && !getenv(HDF5_NOCLEANUP)) - TestCleanup(); + TestSummary(stdout); /* Release test infrastructure */ - TestShutdown(); + if (TestShutdown() < 0) { + fprintf(stderr, "couldn't shut down testing framework\n"); + return -1; + } return GetTestNumErrs(); diff --git a/test/ttsafe.h b/test/ttsafe.h index 7544b3ff32d..61d86be0e4e 100644 --- a/test/ttsafe.h +++ b/test/ttsafe.h @@ -34,33 +34,34 @@ extern char *gen_name(int); /* Prototypes for the test routines */ -void tts_is_threadsafe(void); +void tts_is_threadsafe(const void *); #ifdef H5_HAVE_THREADS -void tts_thread_pool(void); -void tts_atomics(void); -void tts_rwlock(void); -void tts_semaphore(void); +void tts_thread_pool(const void *); +void tts_atomics(const void *); +void tts_rwlock(const void *); +void tts_semaphore(const void *); #ifndef H5_HAVE_WIN_THREADS -void tts_rec_rwlock_smoke_check_1(void); -void tts_rec_rwlock_smoke_check_2(void); -void tts_rec_rwlock_smoke_check_3(void); -void tts_rec_rwlock_smoke_check_4(void); +void tts_rec_rwlock_smoke_check_1(const void *); +void tts_rec_rwlock_smoke_check_2(const void *); +void tts_rec_rwlock_smoke_check_3(const void *); +void tts_rec_rwlock_smoke_check_4(const void *); #endif /* !H5_HAVE_WIN_THREADS */ #ifdef H5_HAVE_THREADSAFE -void tts_dcreate(void); -void tts_error(void); -void tts_cancel(void); -void tts_acreate(void); -void tts_attr_vlen(void); -void tts_thread_id(void); -void tts_develop_api(void); +void tts_dcreate(const void *); +void tts_error(const void *); +void tts_cancel(const void *); +void tts_acreate(const void *); +void tts_attr_vlen(const void *); +void tts_thread_id(const void *); +void tts_develop_api(const void *); +void tts_error_stacks(const void *); /* Prototypes for the cleanup routines */ -void cleanup_dcreate(void); -void cleanup_error(void); -void cleanup_cancel(void); -void cleanup_acreate(void); -void cleanup_attr_vlen(void); +void cleanup_dcreate(void *); +void cleanup_error(void *); +void cleanup_cancel(void *); +void cleanup_acreate(void *); +void cleanup_attr_vlen(void *); #endif /* H5_HAVE_THREADSAFE */ #endif /* H5_HAVE_THREADS */ diff --git a/test/ttsafe_acreate.c b/test/ttsafe_acreate.c index 6ee12d03987..15ad84b1c88 100644 --- a/test/ttsafe_acreate.c +++ b/test/ttsafe_acreate.c @@ -45,7 +45,7 @@ typedef struct acreate_data_struct { } ttsafe_name_data_t; void -tts_acreate(void) +tts_acreate(const void H5_ATTR_UNUSED *params) { /* Thread declarations */ H5TS_thread_t threads[NUM_THREADS]; @@ -182,9 +182,11 @@ tts_acreate_thread(void *client_data) } /* end tts_acreate_thread() */ void -cleanup_acreate(void) +cleanup_acreate(void H5_ATTR_UNUSED *params) { - HDunlink(FILENAME); + if (GetTestCleanup()) { + HDunlink(FILENAME); + } } #endif /*H5_HAVE_THREADSAFE*/ diff --git a/test/ttsafe_atomic.c b/test/ttsafe_atomic.c index 0150ccecade..732a0583ea8 100644 --- a/test/ttsafe_atomic.c +++ b/test/ttsafe_atomic.c @@ -64,7 +64,7 @@ decr_task(void *_counter) ********************************************************************** */ void -tts_atomics(void) +tts_atomics(const void H5_ATTR_UNUSED *params) { H5TS_pool_t *pool = NULL; herr_t result; diff --git a/test/ttsafe_attr_vlen.c b/test/ttsafe_attr_vlen.c index 10ce4c63d78..dc742368b30 100644 --- a/test/ttsafe_attr_vlen.c +++ b/test/ttsafe_attr_vlen.c @@ -51,7 +51,7 @@ H5TS_THREAD_RETURN_TYPE tts_attr_vlen_thread(void *); void -tts_attr_vlen(void) +tts_attr_vlen(const void H5_ATTR_UNUSED *params) { H5TS_thread_t threads[NUM_THREADS] = {0}; /* Thread declaration */ hid_t fid = H5I_INVALID_HID; /* File ID */ @@ -157,7 +157,7 @@ tts_attr_vlen_thread(void H5_ATTR_UNUSED *client_data) VERIFY_STR(string_attr_check, string_attr, "H5Aread"); /* Free the attribute data */ - ret = H5Dvlen_reclaim(atid, asid, H5P_DEFAULT, &string_attr_check); + ret = H5Treclaim(atid, asid, H5P_DEFAULT, &string_attr_check); CHECK(ret, FAIL, "H5Dvlen_reclaim"); /* Close IDs */ @@ -180,9 +180,11 @@ tts_attr_vlen_thread(void H5_ATTR_UNUSED *client_data) } /* end tts_attr_vlen_thread() */ void -cleanup_attr_vlen(void) +cleanup_attr_vlen(void H5_ATTR_UNUSED *params) { - HDunlink(FILENAME); + if (GetTestCleanup()) { + HDunlink(FILENAME); + } } #endif /*H5_HAVE_THREADSAFE*/ diff --git a/test/ttsafe_cancel.c b/test/ttsafe_cancel.c index 7aaf5aaaa6b..7531f3e0dde 100644 --- a/test/ttsafe_cancel.c +++ b/test/ttsafe_cancel.c @@ -55,7 +55,7 @@ pthread_t childthread; static H5TS_barrier_t barrier; void -tts_cancel(void) +tts_cancel(const void H5_ATTR_UNUSED *params) { hid_t dataset; int buffer; @@ -203,9 +203,11 @@ cancellation_cleanup(void *arg) } /* end cancellation_cleanup() */ void -cleanup_cancel(void) +cleanup_cancel(void H5_ATTR_UNUSED *params) { - HDunlink(FILENAME); + if (GetTestCleanup()) { + HDunlink(FILENAME); + } } #endif /*H5_HAVE_PTHREAD_H*/ diff --git a/test/ttsafe_dcreate.c b/test/ttsafe_dcreate.c index 35ad2efbbf1..e206a1415d1 100644 --- a/test/ttsafe_dcreate.c +++ b/test/ttsafe_dcreate.c @@ -54,7 +54,7 @@ thr_info thread_out[NUM_THREAD]; ********************************************************************** */ void -tts_dcreate(void) +tts_dcreate(const void H5_ATTR_UNUSED *params) { /* thread definitions */ H5TS_thread_t threads[NUM_THREAD]; @@ -152,8 +152,10 @@ tts_dcreate_creator(void *_thread_data) } /* end tts_dcreate_creator() */ void -cleanup_dcreate(void) +cleanup_dcreate(void H5_ATTR_UNUSED *params) { - HDunlink(FILENAME); + if (GetTestCleanup()) { + HDunlink(FILENAME); + } } #endif /*H5_HAVE_THREADSAFE*/ diff --git a/test/ttsafe_develop.c b/test/ttsafe_develop.c index 1ecf1757618..8622d326f4c 100644 --- a/test/ttsafe_develop.c +++ b/test/ttsafe_develop.c @@ -97,7 +97,7 @@ tts_develop_api_thr_2(void *_udata) ********************************************************************** */ void -tts_develop_api(void) +tts_develop_api(const void H5_ATTR_UNUSED *params) { H5TS_thread_t thread_1, thread_2; H5TS_barrier_t barrier; diff --git a/test/ttsafe_error.c b/test/ttsafe_error.c index 95062eaa8d1..efa3b7c81b9 100644 --- a/test/ttsafe_error.c +++ b/test/ttsafe_error.c @@ -26,6 +26,9 @@ * ********************************************************************/ #include "ttsafe.h" +#define H5VL_FRIEND /* Suppress error about including H5VLpkg */ +#define H5VL_TESTING +#include "H5VLpkg.h" /* Virtual Object Layer */ #ifdef H5_HAVE_THREADSAFE @@ -56,13 +59,14 @@ static herr_t walk_error_callback(unsigned, const H5E_error2_t static H5TS_THREAD_RETURN_TYPE tts_error_thread(void *); void -tts_error(void) +tts_error(const void H5_ATTR_UNUSED *params) { hid_t def_fapl = H5I_INVALID_HID; hid_t vol_id = H5I_INVALID_HID; hid_t dataset = H5I_INVALID_HID; H5TS_thread_t threads[NUM_THREAD]; int value, i; + int is_native; herr_t status; /* Must initialize these at runtime */ @@ -108,7 +112,10 @@ tts_error(void) status = H5Pget_vol_id(def_fapl, &vol_id); CHECK(status, FAIL, "H5Pget_vol_id"); - if (vol_id == H5VL_NATIVE) { + is_native = H5VL__is_native_connector_test(vol_id); + CHECK(is_native, FAIL, "H5VL__is_native_connector_test"); + + if (is_native) { /* Create a hdf5 file using H5F_ACC_TRUNC access, default file * creation plist and default file access plist */ @@ -251,9 +258,11 @@ walk_error_callback(unsigned n, const H5E_error2_t *err_desc, void H5_ATTR_UNUSE } void -cleanup_error(void) +cleanup_error(void H5_ATTR_UNUSED *params) { - HDunlink(FILENAME); + if (GetTestCleanup()) { + HDunlink(FILENAME); + } } #endif /*H5_HAVE_THREADSAFE*/ diff --git a/test/ttsafe_error_stacks.c b/test/ttsafe_error_stacks.c new file mode 100644 index 00000000000..29f9698bef0 --- /dev/null +++ b/test/ttsafe_error_stacks.c @@ -0,0 +1,112 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +#include "ttsafe.h" + +#ifdef H5_HAVE_THREADSAFE + +#define ERR_CLS_NAME "Custom error class" +#define ERR_CLS_LIB_NAME "example_lib" +#define ERR_CLS_LIB_VERSION "0.1" + +#define ERR_MAJOR_MSG "Okay, Houston, we've had a problem here" +#define ERR_MINOR_MSG "Oops!" + +H5TS_THREAD_RETURN_TYPE generate_hdf5_error(void *arg); +H5TS_THREAD_RETURN_TYPE generate_user_error(void *arg); + +hid_t err_cls_id = H5I_INVALID_HID; + +/* Helper routine to generate an HDF5 library error */ +H5TS_THREAD_RETURN_TYPE +generate_hdf5_error(void H5_ATTR_UNUSED *arg) +{ + H5TS_thread_ret_t ret_value = 0; + ssize_t nobjs = 0; + + H5E_BEGIN_TRY + { + nobjs = H5Fget_obj_count(H5I_INVALID_HID, H5F_OBJ_ALL); + } + H5E_END_TRY + + /* Expect call to fail */ + VERIFY(nobjs, FAIL, "H5Fget_obj_count"); + + return ret_value; +} + +/* Helper routine to generate a user-defined error */ +H5TS_THREAD_RETURN_TYPE +generate_user_error(void H5_ATTR_UNUSED *arg) +{ + H5TS_thread_ret_t ret_value = 0; + hid_t major = H5I_INVALID_HID; + hid_t minor = H5I_INVALID_HID; + herr_t status = FAIL; + + err_cls_id = H5Eregister_class(ERR_CLS_NAME, ERR_CLS_LIB_NAME, ERR_CLS_LIB_VERSION); + CHECK(err_cls_id, H5I_INVALID_HID, "H5Eregister_class"); + + major = H5Ecreate_msg(err_cls_id, H5E_MAJOR, ERR_MAJOR_MSG); + CHECK(major, H5I_INVALID_HID, "H5Ecreate_msg"); + + minor = H5Ecreate_msg(err_cls_id, H5E_MINOR, ERR_MINOR_MSG); + CHECK(minor, H5I_INVALID_HID, "H5Ecreate_msg"); + + status = H5Epush2(H5E_DEFAULT, __FILE__, __func__, __LINE__, err_cls_id, major, minor, "Hello, error\n"); + CHECK(status, FAIL, "H5Epush2"); + + return ret_value; +} + +/* +********************************************************************** +* tts_error_stacks +* +* Test that error stacks with user-defined error classes and messages +* in secondary threads are properly cleaned up at library shutdown time. +********************************************************************** +*/ +void +tts_error_stacks(const void H5_ATTR_UNUSED *params) +{ + H5TS_thread_t threads[2]; + herr_t status = FAIL; + + /* Open library */ + H5open(); + + status = H5TS_thread_create(&threads[0], generate_hdf5_error, NULL); + CHECK(status, FAIL, "H5TS_thread_create"); + + status = H5TS_thread_join(threads[0], NULL); + CHECK(status, FAIL, "H5TS_thread_join"); + + status = H5TS_thread_create(&threads[1], generate_user_error, NULL); + CHECK(status, FAIL, "H5TS_thread_create"); + + status = H5TS_thread_join(threads[1], NULL); + CHECK(status, FAIL, "H5TS_thread_join"); + + if (err_cls_id <= 0) { + TestErrPrintf("Failed to set up user error\n"); + return; + } + + status = H5Eunregister_class(err_cls_id); + CHECK(status, FAIL, "H5Eunregister_class"); + + /* Close library */ + H5close(); +} + +#endif diff --git a/test/ttsafe_rec_rwlock.c b/test/ttsafe_rec_rwlock.c index a09dc6ea9c5..6fafbb4f8c8 100644 --- a/test/ttsafe_rec_rwlock.c +++ b/test/ttsafe_rec_rwlock.c @@ -263,7 +263,7 @@ tts_rec_rwlock_smoke_check_test_thread(void *_udata) ********************************************************************** */ void -tts_rec_rwlock_smoke_check_1(void) +tts_rec_rwlock_smoke_check_1(const void H5_ATTR_UNUSED *params) { herr_t result; #if H5TS_ENABLE_REC_RWLOCK_STATS @@ -546,7 +546,7 @@ tts_rec_rwlock_smoke_check_1(void) ********************************************************************** */ void -tts_rec_rwlock_smoke_check_2(void) +tts_rec_rwlock_smoke_check_2(const void H5_ATTR_UNUSED *params) { herr_t result; int express_test; @@ -752,7 +752,7 @@ tts_rec_rwlock_smoke_check_2(void) ********************************************************************** */ void -tts_rec_rwlock_smoke_check_3(void) +tts_rec_rwlock_smoke_check_3(const void H5_ATTR_UNUSED *params) { herr_t result; int i; @@ -958,7 +958,7 @@ tts_rec_rwlock_smoke_check_3(void) ********************************************************************** */ void -tts_rec_rwlock_smoke_check_4(void) +tts_rec_rwlock_smoke_check_4(const void H5_ATTR_UNUSED *params) { herr_t result; int i; diff --git a/test/ttsafe_rwlock.c b/test/ttsafe_rwlock.c index d6fcad38bd1..db73fa4e361 100644 --- a/test/ttsafe_rwlock.c +++ b/test/ttsafe_rwlock.c @@ -182,7 +182,7 @@ verify_counting(void *_counter) ********************************************************************** */ void -tts_rwlock(void) +tts_rwlock(const void H5_ATTR_UNUSED *params) { H5TS_thread_t threads[NUM_THREADS]; H5TS_pool_t *pool = NULL; diff --git a/test/ttsafe_semaphore.c b/test/ttsafe_semaphore.c index 563a4f5d3c1..0c9deab1c79 100644 --- a/test/ttsafe_semaphore.c +++ b/test/ttsafe_semaphore.c @@ -229,7 +229,7 @@ tts_semaphore_clientserver(void) ********************************************************************** */ void -tts_semaphore(void) +tts_semaphore(const void H5_ATTR_UNUSED *params) { H5TS_semaphore_t sem; herr_t result; diff --git a/test/ttsafe_thread_id.c b/test/ttsafe_thread_id.c index cd063e05cda..08d9a221966 100644 --- a/test/ttsafe_thread_id.c +++ b/test/ttsafe_thread_id.c @@ -93,7 +93,7 @@ thread_main(void H5_ATTR_UNUSED *arg) ********************************************************************** */ void -tts_thread_id(void) +tts_thread_id(const void H5_ATTR_UNUSED *params) { H5TS_thread_t threads[NTHREADS]; uint64_t tid; diff --git a/test/ttsafe_thread_pool.c b/test/ttsafe_thread_pool.c index e5362111be1..e322bbeffac 100644 --- a/test/ttsafe_thread_pool.c +++ b/test/ttsafe_thread_pool.c @@ -92,7 +92,7 @@ decr_task(void *_counter) ********************************************************************** */ void -tts_thread_pool(void) +tts_thread_pool(const void H5_ATTR_UNUSED *params) { H5TS_pool_t *pool = NULL; herr_t result; diff --git a/test/tunicode.c b/test/tunicode.c index 62ce82da804..0b54281f835 100644 --- a/test/tunicode.c +++ b/test/tunicode.c @@ -802,7 +802,7 @@ dump_string(const char *string) * that string. */ void -test_unicode(void) +test_unicode(const void H5_ATTR_UNUSED *params) { char test_string[MAX_STRING_LENGTH]; unsigned int cur_pos = 0; /* Current position in test_string */ @@ -864,11 +864,13 @@ test_unicode(void) * Delete the file this test created. */ void -cleanup_unicode(void) +cleanup_unicode(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(FILENAME, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/tvlstr.c b/test/tvlstr.c index 9f41a0570b3..51004de8d08 100644 --- a/test/tvlstr.c +++ b/test/tvlstr.c @@ -968,7 +968,7 @@ test_write_same_element(void) ** ****************************************************************/ void -test_vlstrings(void) +test_vlstrings(const void H5_ATTR_UNUSED *params) { /* Output message about test being performed */ MESSAGE(5, ("Testing Variable-Length Strings\n")); @@ -1000,13 +1000,15 @@ test_vlstrings(void) *------------------------------------------------------------------------- */ void -cleanup_vlstrings(void) +cleanup_vlstrings(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(DATAFILE, H5P_DEFAULT); - H5Fdelete(DATAFILE2, H5P_DEFAULT); - H5Fdelete(DATAFILE3, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(DATAFILE, H5P_DEFAULT); + H5Fdelete(DATAFILE2, H5P_DEFAULT); + H5Fdelete(DATAFILE3, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/tvltypes.c b/test/tvltypes.c index 1ca7de3bd83..8277337c27e 100644 --- a/test/tvltypes.c +++ b/test/tvltypes.c @@ -2542,7 +2542,7 @@ test_vltypes_fill_value(void) hsize_t large_dims[] = {SPACE4_DIM_LARGE}; size_t dset_elmts = 0; /* Number of elements in a particular dataset */ const dtype1_struct fill1 = {1, 2, "foobar", "", NULL, "\0", "dead", - 3, 4.0, 100.0, 1.0, "liquid", "meter"}; + 3, 4.0, 100.0, 1.0, "liquid", "meter"}; const dtype1_struct wdata = {3, 4, "", NULL, "\0", "foo", "two", 6, 8.0, 200.0, 2.0, "solid", "yard"}; dtype1_struct *rbuf = NULL; /* Buffer for reading data */ size_t mem_used = 0; /* Memory used during allocation */ @@ -3233,7 +3233,7 @@ test_vltypes_fill_value(void) ** ****************************************************************/ void -test_vltypes(void) +test_vltypes(const void H5_ATTR_UNUSED *params) { /* Output message about test being performed */ MESSAGE(5, ("Testing Variable-Length Datatypes\n")); @@ -3266,11 +3266,13 @@ test_vltypes(void) *------------------------------------------------------------------------- */ void -cleanup_vltypes(void) +cleanup_vltypes(void H5_ATTR_UNUSED *params) { - H5E_BEGIN_TRY - { - H5Fdelete(FILENAME, H5P_DEFAULT); + if (GetTestCleanup()) { + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME, H5P_DEFAULT); + } + H5E_END_TRY } - H5E_END_TRY } diff --git a/test/unlink.c b/test/unlink.c index 42a03ac94d4..914b434bae7 100644 --- a/test/unlink.c +++ b/test/unlink.c @@ -529,7 +529,7 @@ test_filespace(hid_t fapl) char objname[128]; /* Name of object to create */ hsize_t dims[FILESPACE_NDIMS] = {FILESPACE_DIM0, FILESPACE_DIM1, FILESPACE_DIM2}; /* Dataset dimensions */ hsize_t chunk_dims[FILESPACE_NDIMS] = {FILESPACE_CHUNK0, FILESPACE_CHUNK1, - FILESPACE_CHUNK2}; /* Chunk dimensions */ + FILESPACE_CHUNK2}; /* Chunk dimensions */ hsize_t attr_dims[FILESPACE_ATTR_NDIMS] = {FILESPACE_ATTR_DIM0, FILESPACE_ATTR_DIM1}; /* Attribute dimensions */ int *data = NULL; /* Pointer to dataset buffer */ diff --git a/test/vds.c b/test/vds.c index 8dafc5c6890..3d2f41eac05 100644 --- a/test/vds.c +++ b/test/vds.c @@ -522,7 +522,7 @@ test_api(test_api_config_t config, hid_t fapl, H5F_libver_t low) hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ hid_t ex_dcpl = H5I_INVALID_HID; /* Temporary dcpl for examination */ hid_t srcspace[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Source dataspaces */ + H5I_INVALID_HID}; /* Source dataspaces */ hid_t vspace[LIST_DOUBLE_SIZE]; /* Virtual dset dataspaces */ const char *src_file[4] = {"src_file1", "src_file2.", "src_file3..", "src_file4..."}; /* Source file names (different lengths) */ @@ -1188,17 +1188,17 @@ test_vds_prefix_first(unsigned config, hid_t vds_fapl, hid_t src_fapl) char *srcfilenamepct_map = NULL; const char *srcfilenamepct_map_orig = "vds%%%%_src"; hid_t srcfile[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Files with source dsets */ + H5I_INVALID_HID}; /* Files with source dsets */ hid_t vfile = H5I_INVALID_HID; /* File with virtual dset */ hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ hid_t dapl = H5I_INVALID_HID; /* Dataset access property list */ hid_t srcspace[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Source dataspaces */ + H5I_INVALID_HID}; /* Source dataspaces */ hid_t vspace[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Virtual dset dataspaces */ + H5I_INVALID_HID}; /* Virtual dset dataspaces */ hid_t memspace = H5I_INVALID_HID; /* Memory dataspace */ hid_t srcdset[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Source datasets */ + H5I_INVALID_HID}; /* Source datasets */ hid_t vdset = H5I_INVALID_HID; /* Virtual dataset */ hsize_t dims[4] = {10, 26, 0, 0}; /* Data space current size */ int buf[10][26]; /* Write and expected read buffer */ @@ -1471,17 +1471,17 @@ test_basic_io(unsigned config, hid_t vds_fapl, hid_t src_fapl) char *srcfilenamepct_map = NULL; const char *srcfilenamepct_map_orig = "vds%%%%_src"; hid_t srcfile[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Files with source dsets */ + H5I_INVALID_HID}; /* Files with source dsets */ hid_t vfile = H5I_INVALID_HID; /* File with virtual dset */ hid_t vfile2 = H5I_INVALID_HID; /* File with copied virtual dset */ hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ hid_t srcspace[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Source dataspaces */ + H5I_INVALID_HID}; /* Source dataspaces */ hid_t vspace[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Virtual dset dataspaces */ + H5I_INVALID_HID}; /* Virtual dset dataspaces */ hid_t memspace = H5I_INVALID_HID; /* Memory dataspace */ hid_t srcdset[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Source datasets */ + H5I_INVALID_HID}; /* Source datasets */ hid_t vdset = H5I_INVALID_HID; /* Virtual dataset */ hsize_t dims[4] = {10, 26, 0, 0}; /* Data space current size */ hsize_t start[4]; /* Hyperslab start */ @@ -4401,19 +4401,19 @@ test_unlim(unsigned config, hid_t vds_fapl, hid_t src_fapl) char srcfilename_map[FILENAME_BUF_SIZE]; char vfilename[FILENAME_BUF_SIZE]; hid_t srcfile[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Files with source dsets */ + H5I_INVALID_HID}; /* Files with source dsets */ hid_t vfile = H5I_INVALID_HID; /* File with virtual dset */ hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ hid_t srcdcpl = H5I_INVALID_HID; /* DCPL for source dset */ hid_t dapl = H5I_INVALID_HID; /* Dataset access property list */ hid_t srcspace[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Source dataspaces */ + H5I_INVALID_HID}; /* Source dataspaces */ hid_t vspace[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Virtual dset dataspaces */ + H5I_INVALID_HID}; /* Virtual dset dataspaces */ hid_t memspace = H5I_INVALID_HID; /* Memory dataspace */ hid_t filespace = H5I_INVALID_HID; /* File dataspace */ hid_t srcdset[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Source datasets */ + H5I_INVALID_HID}; /* Source datasets */ hid_t vdset = H5I_INVALID_HID; /* Virtual dataset */ hsize_t dims[2] = {10, 10}; /* Data space current size */ hsize_t mdims[2] = {10, 20}; /* Data space maximum size */ @@ -7420,7 +7420,7 @@ test_printf(unsigned config, hid_t vds_fapl, hid_t src_fapl) const char *printf_srcfilename_map_orig = "vds_src_%b"; const char *srcfilenamepct_map_orig = "vds%%%%_src"; hid_t srcfile[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Files with source dsets */ + H5I_INVALID_HID}; /* Files with source dsets */ hid_t vfile = H5I_INVALID_HID; /* File with virtual dset */ hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ hid_t dapl = H5I_INVALID_HID; /* Dataset access property list */ @@ -7429,7 +7429,7 @@ test_printf(unsigned config, hid_t vds_fapl, hid_t src_fapl) hid_t memspace = H5I_INVALID_HID; /* Memory dataspace */ hid_t filespace = H5I_INVALID_HID; /* File dataspace */ hid_t srcdset[6] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; /* Source datasets */ + H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; /* Source datasets */ hid_t vdset = H5I_INVALID_HID; /* Virtual dataset */ hsize_t dims[2] = {10, 0}; /* Data space current size */ hsize_t mdims[2] = {10, 20}; /* Data space maximum size */ @@ -11140,7 +11140,7 @@ test_all(unsigned config, hid_t vds_fapl, hid_t src_fapl) hid_t memspace = H5I_INVALID_HID; /* Memory dataspace */ hid_t filespace = H5I_INVALID_HID; /* File dataspace */ hid_t srcdset[5] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Source datasets */ + H5I_INVALID_HID}; /* Source datasets */ hid_t vdset = H5I_INVALID_HID; /* Virtual dataset */ hsize_t dims[2] = {6, 6}; /* Data space current size */ hsize_t mdims[2] = {10, 10}; /* Data space maximum size */ diff --git a/test/vds_env.c b/test/vds_env.c index 9e24067d75a..757732adaf9 100644 --- a/test/vds_env.c +++ b/test/vds_env.c @@ -49,17 +49,17 @@ test_vds_prefix_second(unsigned config, hid_t fapl) char *srcfilenamepct = NULL; char *srcfilenamepct_map = NULL; hid_t srcfile[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Files with source dsets */ + H5I_INVALID_HID}; /* Files with source dsets */ hid_t vfile = H5I_INVALID_HID; /* File with virtual dset */ hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ hid_t dapl = H5I_INVALID_HID; /* Dataset access property list */ hid_t srcspace[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Source dataspaces */ + H5I_INVALID_HID}; /* Source dataspaces */ hid_t vspace[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Virtual dset dataspaces */ + H5I_INVALID_HID}; /* Virtual dset dataspaces */ hid_t memspace = H5I_INVALID_HID; /* Memory dataspace */ hid_t srcdset[4] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID}; /* Source datasets */ + H5I_INVALID_HID}; /* Source datasets */ hid_t vdset = H5I_INVALID_HID; /* Virtual dataset */ hsize_t dims[4] = {10, 26, 0, 0}; /* Data space current size */ int buf[10][26]; /* Write and expected read buffer */ diff --git a/test/vol.c b/test/vol.c index 9fa4f06c1ca..92b64c2532d 100644 --- a/test/vol.c +++ b/test/vol.c @@ -1727,7 +1727,7 @@ exercise_reg_opt_oper(hid_t fake_vol_id, hid_t reg_opt_vol_id, H5VL_subclass_t s H5CX_push(); /* Create fake object on fake VOL connector */ - if (H5I_INVALID_HID == (obj_id = H5VL_register_using_vol_id(id_type, &fake_obj, fake_vol_id, true))) + if (H5I_INVALID_HID == (obj_id = H5VL__register_using_vol_id_test(id_type, &fake_obj, fake_vol_id))) TEST_ERROR; /* Pop the API context off the stack */ @@ -1783,7 +1783,7 @@ exercise_reg_opt_oper(hid_t fake_vol_id, hid_t reg_opt_vol_id, H5VL_subclass_t s H5CX_push(); /* Create fake object on reg_opt VOL connector */ - if (H5I_INVALID_HID == (obj_id = H5VL_register_using_vol_id(id_type, &fake_obj, reg_opt_vol_id, true))) + if (H5I_INVALID_HID == (obj_id = H5VL__register_using_vol_id_test(id_type, &fake_obj, reg_opt_vol_id))) TEST_ERROR; /* Pop the API context off the stack */ @@ -2209,12 +2209,12 @@ test_vol_cap_flags(void) /* If using the native VOL by default, check flags again with H5P_DEFAULT */ vol_env = getenv(HDF5_VOL_CONNECTOR); if (!vol_env || (0 == strcmp(vol_env, "native"))) { - H5VL_class_t *cls; - hid_t connector_id; + H5VL_connector_t *connector; + hid_t connector_id; if (H5Pget_vol_id(H5P_DEFAULT, &connector_id) < 0) TEST_ERROR; - if (NULL == (cls = H5I_object(connector_id))) + if (NULL == (connector = H5I_object(connector_id))) TEST_ERROR; vol_cap_flags_g = H5VL_CAP_FLAG_NONE; @@ -2222,7 +2222,7 @@ test_vol_cap_flags(void) if (H5Pget_vol_cap_flags(H5P_DEFAULT, &vol_cap_flags_g) < 0) TEST_ERROR; - if (vol_cap_flags_g != cls->cap_flags) + if (vol_cap_flags_g != connector->cls->cap_flags) TEST_ERROR; if (H5VLclose(connector_id) < 0) diff --git a/test/vol_plugin.c b/test/vol_plugin.c index 8ebc050c3b9..414f18c6677 100644 --- a/test/vol_plugin.c +++ b/test/vol_plugin.c @@ -257,6 +257,7 @@ test_getters(void) htri_t is_registered = FAIL; hid_t vol_id = H5I_INVALID_HID; hid_t vol_id_out = H5I_INVALID_HID; + int cmp_value; /* Comparison value */ TESTING("VOL getters"); @@ -273,7 +274,10 @@ test_getters(void) /* Get the connector's ID by name */ if ((vol_id_out = H5VLget_connector_id_by_name(NULL_VOL_CONNECTOR_NAME)) < 0) TEST_ERROR; - if (vol_id != vol_id_out) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, vol_id, vol_id_out) < 0) + TEST_ERROR; + if (cmp_value) FAIL_PUTS_ERROR("VOL connector IDs don't match"); if (H5VLclose(vol_id_out) < 0) TEST_ERROR; @@ -295,7 +299,10 @@ test_getters(void) /* Get the connector's ID by value */ if ((vol_id_out = H5VLget_connector_id_by_value(NULL_VOL_CONNECTOR_VALUE)) < 0) TEST_ERROR; - if (vol_id != vol_id_out) + cmp_value = 0; + if (H5VLcmp_connector_cls(&cmp_value, vol_id, vol_id_out) < 0) + TEST_ERROR; + if (cmp_value) FAIL_PUTS_ERROR("VOL connector IDs don't match"); if (H5VLclose(vol_id_out) < 0) TEST_ERROR; diff --git a/testpar/API/H5_api_async_test_parallel.c b/testpar/API/H5_api_async_test_parallel.c index 768bbc2ab07..e8a792702fb 100644 --- a/testpar/API/H5_api_async_test_parallel.c +++ b/testpar/API/H5_api_async_test_parallel.c @@ -434,7 +434,7 @@ test_multi_dataset_io(void) hid_t file_id = H5I_INVALID_HID; hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id[MULTI_DATASET_IO_TEST_NDSETS] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID, H5I_INVALID_HID}; + H5I_INVALID_HID, H5I_INVALID_HID}; hid_t space_id = H5I_INVALID_HID; hid_t mspace_id = H5I_INVALID_HID; hid_t es_id = H5I_INVALID_HID; @@ -758,9 +758,9 @@ test_multi_file_dataset_io(void) size_t i, j, data_size, num_in_progress; hid_t fapl_id = H5I_INVALID_HID; hid_t file_id[MULTI_FILE_DATASET_IO_TEST_NFILES] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID, H5I_INVALID_HID}; + H5I_INVALID_HID, H5I_INVALID_HID}; hid_t dset_id[MULTI_FILE_DATASET_IO_TEST_NFILES] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, - H5I_INVALID_HID, H5I_INVALID_HID}; + H5I_INVALID_HID, H5I_INVALID_HID}; hid_t space_id = H5I_INVALID_HID; hid_t mspace_id = H5I_INVALID_HID; hid_t es_id = H5I_INVALID_HID; diff --git a/testpar/API/H5_api_test_parallel.c b/testpar/API/H5_api_test_parallel.c index 71264af418c..c67f93280fe 100644 --- a/testpar/API/H5_api_test_parallel.c +++ b/testpar/API/H5_api_test_parallel.c @@ -323,6 +323,8 @@ main(int argc, char **argv) INDEPENDENT_OP_ERROR(check_vol_register); } else { + int cmp = 0; + /* * If the connector was successfully registered, check that * the connector ID set on the default FAPL matches the ID @@ -340,7 +342,13 @@ main(int argc, char **argv) INDEPENDENT_OP_ERROR(check_vol_register); } - if (default_con_id != registered_con_id) { + if (H5VLcmp_connector_cls(&cmp, default_con_id, registered_con_id) < 0) { + if (MAINPROCESS) + fprintf(stderr, "Couldn't compare VOL connector classes\n"); + INDEPENDENT_OP_ERROR(check_vol_register); + } + + if (0 != cmp) { if (MAINPROCESS) fprintf(stderr, "VOL connector set on default FAPL didn't match specified VOL connector\n"); diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt index 106f79eba1e..35b862e1414 100644 --- a/testpar/CMakeLists.txt +++ b/testpar/CMakeLists.txt @@ -1,6 +1,17 @@ cmake_minimum_required (VERSION 3.18) project (HDF5_TEST_PAR C) +################################################################################# +# Define Parallel Test Library Sources +################################################################################# +set (TEST_PAR_LIB_SOURCES + ${HDF5_TEST_PAR_SOURCE_DIR}/testpar.c +) + +set (TEST_PAR_LIB_HEADERS + ${HDF5_TEST_PAR_SOURCE_DIR}/testpar.h +) + #----------------------------------------------------------------------------- # Define Tests #----------------------------------------------------------------------------- @@ -30,6 +41,101 @@ set (HDF5_TESTPAR_COMPILE_DEFS_PRIVATE "$<$:H5_HAVE_TEST_API>" ) +################################################################################# +# Setup build for parallel test library +################################################################################# +if (BUILD_STATIC_LIBS) + add_library (${HDF5_TEST_PAR_LIB_TARGET} STATIC ${TEST_PAR_LIB_SOURCES} ${TEST_PAR_LIB_HEADERS}) + target_include_directories (${HDF5_TEST_PAR_LIB_TARGET} + PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_TEST_SRC_DIR};${HDF5_SRC_BINARY_DIR};${HDF5_TEST_PAR_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>" + INTERFACE "$/include>" + ) + target_compile_options(${HDF5_TEST_PAR_LIB_TARGET} PRIVATE "${HDF5_CMAKE_C_FLAGS}") + target_compile_definitions(${HDF5_TEST_PAR_LIB_TARGET} + PRIVATE + "H5_TEST_EXPRESS_LEVEL_DEFAULT=${H5_TEST_EXPRESS_LEVEL_DEFAULT}" + "${HDF5_TEST_COMPILE_DEFS_PRIVATE}" + ) + TARGET_C_PROPERTIES (${HDF5_TEST_PAR_LIB_TARGET} STATIC) + target_link_libraries (${HDF5_TEST_PAR_LIB_TARGET} + PUBLIC ${LINK_LIBS} ${HDF5_LIB_TARGET} + PRIVATE "$<$,$>:ws2_32.lib>" + ) + if (MINGW) + target_link_libraries (${HDF5_TEST_PAR_LIB_TARGET} PRIVATE "wsock32.lib") + endif () + H5_SET_LIB_OPTIONS (${HDF5_TEST_PAR_LIB_TARGET} ${HDF5_TEST_PAR_LIB_NAME} STATIC 0) + set_target_properties (${HDF5_TEST_PAR_LIB_TARGET} PROPERTIES FOLDER libraries/test/par) + + if (HDF5_EXPORTED_TARGETS AND HDF5_TEST_API_INSTALL) + INSTALL_TARGET_PDB (${HDF5_TEST_PAR_LIB_TARGET} ${HDF5_INSTALL_LIB_DIR} libraries) + + install ( + TARGETS ${HDF5_TEST_PAR_LIB_TARGET} + EXPORT ${HDF5_EXPORTED_TARGETS} + LIBRARY DESTINATION ${HDF5_INSTALL_LIB_DIR} COMPONENT libraries + ARCHIVE DESTINATION ${HDF5_INSTALL_LIB_DIR} COMPONENT libraries + RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT libraries + FRAMEWORK DESTINATION ${HDF5_INSTALL_FWRK_DIR} COMPONENT libraries + INCLUDES DESTINATION include + ) + endif () +endif () + +if (BUILD_SHARED_LIBS) + add_library (${HDF5_TEST_PAR_LIBSH_TARGET} SHARED ${TEST_PAR_LIB_SOURCES} ${TEST_PAR_LIB_HEADERS}) + target_include_directories (${HDF5_TEST_PAR_LIBSH_TARGET} + PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_TEST_SRC_DIR};${HDF5_SRC_BINARY_DIR};${HDF5_TEST_PAR_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>" + INTERFACE "$/include>" + ) + target_compile_options(${HDF5_TEST_PAR_LIBSH_TARGET} PRIVATE "${HDF5_CMAKE_C_FLAGS}") + target_compile_definitions(${HDF5_TEST_PAR_LIBSH_TARGET} + PUBLIC "H5_BUILT_AS_DYNAMIC_LIB" + PRIVATE + "H5_TEST_EXPRESS_LEVEL_DEFAULT=${H5_TEST_EXPRESS_LEVEL_DEFAULT}" + "${HDF5_TEST_COMPILE_DEFS_PRIVATE}" + ) + TARGET_C_PROPERTIES (${HDF5_TEST_PAR_LIBSH_TARGET} SHARED) + target_link_libraries (${HDF5_TEST_PAR_LIBSH_TARGET} + PUBLIC ${LINK_LIBS} ${HDF5_LIBSH_TARGET} + PRIVATE "$<$,$>:ws2_32.lib>" + ) + if (MINGW) + target_link_libraries (${HDF5_TEST_PAR_LIBSH_TARGET} PRIVATE "wsock32.lib") + endif () + H5_SET_LIB_OPTIONS (${HDF5_TEST_PAR_LIBSH_TARGET} ${HDF5_TEST_PAR_LIB_NAME} SHARED "LIB") + set_target_properties (${HDF5_TEST_PAR_LIBSH_TARGET} PROPERTIES FOLDER libraries/test/par) + + if (HDF5_EXPORTED_TARGETS AND HDF5_TEST_API_INSTALL) + INSTALL_TARGET_PDB (${HDF5_TEST_PAR_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} libraries) + + install ( + TARGETS ${HDF5_TEST_PAR_LIBSH_TARGET} + EXPORT ${HDF5_EXPORTED_TARGETS} + LIBRARY DESTINATION ${HDF5_INSTALL_LIB_DIR} COMPONENT libraries + ARCHIVE DESTINATION ${HDF5_INSTALL_LIB_DIR} COMPONENT libraries + RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT libraries + FRAMEWORK DESTINATION ${HDF5_INSTALL_FWRK_DIR} COMPONENT libraries + INCLUDES DESTINATION include + ) + endif () +endif () + +#----------------------------------------------------------------------------- +# Add Target to clang-format +#----------------------------------------------------------------------------- +if (HDF5_ENABLE_FORMATTERS) + if (BUILD_STATIC_LIBS) + clang_format (HDF5_TEST_PAR_SRC_FORMAT ${HDF5_TEST_PAR_LIB_TARGET}) + else () + clang_format (HDF5_TEST_PAR_SRC_FORMAT ${HDF5_TEST_PAR_LIBSH_TARGET}) + endif () +endif () + +################################################################################# +# Tests +################################################################################# + #-- Adding test for testhdf5 add_executable (testphdf5 ${testphdf5_SOURCES}) target_compile_options(testphdf5 PRIVATE "${HDF5_CMAKE_C_FLAGS}") @@ -40,12 +146,12 @@ target_include_directories (testphdf5 if (NOT BUILD_SHARED_LIBS) TARGET_C_PROPERTIES (testphdf5 STATIC) target_link_libraries (testphdf5 - PRIVATE ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$:MPI::MPI_C>" + PRIVATE ${HDF5_TEST_LIB_TARGET} ${HDF5_TEST_PAR_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$:MPI::MPI_C>" ) else () TARGET_C_PROPERTIES (testphdf5 SHARED) target_link_libraries (testphdf5 - PRIVATE ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$:MPI::MPI_C>" + PRIVATE ${HDF5_TEST_LIBSH_TARGET} ${HDF5_TEST_PAR_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$:MPI::MPI_C>" ) endif () set_target_properties (testphdf5 PROPERTIES FOLDER test/par) @@ -67,13 +173,15 @@ macro (ADD_H5P_EXE file) if (NOT BUILD_SHARED_LIBS) TARGET_C_PROPERTIES (${file} STATIC) target_link_libraries (${file} - PRIVATE ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$:MPI::MPI_C>" + PRIVATE ${HDF5_TEST_LIB_TARGET} ${HDF5_TEST_PAR_LIB_TARGET} ${HDF5_LIB_TARGET} + "$<$:MPI::MPI_C>" "$<$,$>:ws2_32.lib>" ) else () TARGET_C_PROPERTIES (${file} SHARED) target_link_libraries (${file} - PRIVATE ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$:MPI::MPI_C>" + PRIVATE ${HDF5_TEST_LIBSH_TARGET} ${HDF5_TEST_PAR_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} + "$<$:MPI::MPI_C>" "$<$,$>:ws2_32.lib>" ) endif () diff --git a/testpar/Makefile.am b/testpar/Makefile.am index 4a8cb826f49..27002fa3de3 100644 --- a/testpar/Makefile.am +++ b/testpar/Makefile.am @@ -42,12 +42,17 @@ endif # t_pflush1 and t_pflush2 are used by testpflush.sh check_PROGRAMS = $(TEST_PROG_PARA) t_pflush1 t_pflush2 +# The libh5testpar library provides common support code for the tests. +noinst_LTLIBRARIES=libh5testpar.la + +libh5testpar_la_SOURCES=testpar.c + testphdf5_SOURCES=testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \ t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \ t_prop.c t_coll_md.c t_oflush.c -# The tests all depend on the hdf5 library and the test library -LDADD = $(LIBH5TEST) $(LIBHDF5) +# The tests all depend on the hdf5 library and the test libraries +LDADD = $(LIBH5TEST) libh5testpar.la $(LIBHDF5) # Temporary files # MPItest.h5 is from t_mpi diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c index 8674e35369e..3a5a63d11cc 100644 --- a/testpar/t_2Gio.c +++ b/testpar/t_2Gio.c @@ -27,7 +27,10 @@ #include #include "hdf5.h" -#include "testphdf5.h" +#include "testpar.h" + +/* Include testing framework functionality */ +#include "testframe.h" #include "mpi.h" @@ -54,10 +57,21 @@ #define BIG_Z_FACTOR 2048 #endif +#define DATASETNAME1 "Data1" +#define DATASETNAME2 "Data2" +#define DATASETNAME3 "Data3" +#define DATASETNAME4 "Data4" +#define DATASETNAME7 "Data7" +#define DATASETNAME8 "Data8" +#define DATASETNAME9 "Data9" + #ifndef PATH_MAX #define PATH_MAX 512 #endif /* !PATH_MAX */ +/* Dataset data type. Int's can be easily octo dumped. */ +typedef int DATATYPE; + /* global variables */ int dim0; int dim1; @@ -81,6 +95,11 @@ char *filenames[NFILENAME]; hid_t fapl; /* file access property list */ MPI_Comm test_comm = MPI_COMM_WORLD; +/* Structure for passing test parameters around */ +typedef struct test_params_t { + char *filename; +} test_params_t; + // static int enable_error_stack = 0; /* enable error stack; disable=0 enable=1 */ // static const char *TestProgName = NULL; // static void (*TestPrivateUsage)(void) = NULL; @@ -94,20 +113,20 @@ MPI_Comm test_comm = MPI_COMM_WORLD; * Show command usage */ static void -usage(void) +usage(FILE *stream) { - printf(" [-r] [-w] [-m] [-n] " - "[-o] [-f ] [-d ]\n"); - printf("\t-m" - "\tset number of datasets for the multiple dataset test\n"); - printf("\t-n" - "\tset number of groups for the multiple group test\n"); - printf("\t-f \tfilename prefix\n"); - printf("\t-2\t\tuse Split-file together with MPIO\n"); - printf("\t-d \tdataset dimensions factors. Defaults (%d,%d)\n", BIG_X_FACTOR, - BIG_Y_FACTOR); - printf("\t-c \tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); - printf("\n"); + fprintf(stream, " [-r] [-w] [-m] [-n] " + "[-o] [-f ] [-d ]\n"); + fprintf(stream, "\t-m" + "\tset number of datasets for the multiple dataset test\n"); + fprintf(stream, "\t-n" + "\tset number of groups for the multiple group test\n"); + fprintf(stream, "\t-f \tfilename prefix\n"); + fprintf(stream, "\t-2\t\tuse Split-file together with MPIO\n"); + fprintf(stream, "\t-d \tdataset dimensions factors. Defaults (%d,%d)\n", BIG_X_FACTOR, + BIG_Y_FACTOR); + fprintf(stream, "\t-c \tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); + fprintf(stream, "\n"); } /* @@ -239,59 +258,6 @@ parse_options(int argc, char **argv) return (0); } -/* - * Create the appropriate File access property list - */ -hid_t -create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) -{ - hid_t ret_pl = H5I_INVALID_HID; - herr_t ret; /* generic return value */ - int mpi_rank; /* mpi variables */ - - /* need the rank for error checking macros */ - MPI_Comm_rank(test_comm, &mpi_rank); - - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); - - if (l_facc_type == FACC_DEFAULT) - return (ret_pl); - - if (l_facc_type == FACC_MPIO) { - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY((ret >= 0), ""); - ret = H5Pset_all_coll_metadata_ops(ret_pl, true); - VRFY((ret >= 0), ""); - ret = H5Pset_coll_metadata_write(ret_pl, true); - VRFY((ret >= 0), ""); - return (ret_pl); - } - - if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { - hid_t mpio_pl; - - mpio_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((mpio_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY((ret >= 0), ""); - - /* setup file access template */ - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return (ret_pl); - } - - /* unknown file access types */ - return (ret_pl); -} - /* * Setup the dimensions of the hyperslab. * Two modes--by rows or by columns. @@ -383,54 +349,6 @@ slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t s } } -/* - * Setup the coordinates for point selection. - */ -void -point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, - hsize_t coords[], int order) -{ - hsize_t i, j, k = 0, m, n, s1, s2; - - // HDcompile_assert(MAX_RANK == 3); - HDcompile_assert(MAX_RANK == 2); - - if (OUT_OF_ORDER == order) - k = (num_points * MAX_RANK) - 1; - else if (IN_ORDER == order) - k = 0; - - s1 = start[0]; - s2 = start[1]; - - for (i = 0; i < count[0]; i++) - for (j = 0; j < count[1]; j++) - for (m = 0; m < block[0]; m++) - for (n = 0; n < block[1]; n++) - if (OUT_OF_ORDER == order) { - coords[k--] = s2 + (stride[1] * j) + n; - coords[k--] = s1 + (stride[0] * i) + m; - } - else if (IN_ORDER == order) { - coords[k++] = s1 + stride[0] * i + m; - coords[k++] = s2 + stride[1] * j + n; - } - - if (VERBOSE_MED) { - printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " - "datapoints=%lu\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], - (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], - (unsigned long)block[0], (unsigned long)block[1], - (unsigned long)(block[0] * block[1] * count[0] * count[1])); - k = 0; - for (i = 0; i < num_points; i++) { - printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); - k += 2; - } - } -} - /* * Fill the dataset with trivial data for testing. * Assume dimension rank is 2 and data is stored contiguous. @@ -479,7 +397,7 @@ dataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset) /* * Print the content of the dataset. */ -int +static int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, DATATYPE *original) { @@ -700,8 +618,8 @@ MpioTest2G(MPI_Comm comm) * dataset. */ -void -dataset_writeInd(void) +static void +dataset_writeInd(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -727,7 +645,7 @@ dataset_writeInd(void) MPI_Comm comm = test_comm; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const test_params_t *)params)->filename; if (VERBOSE_MED) printf("Independent write test on file %s\n", filename); @@ -845,8 +763,8 @@ dataset_writeInd(void) } /* Example of using the parallel HDF5 library to read a dataset */ -void -dataset_readInd(void) +static void +dataset_readInd(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -867,7 +785,7 @@ dataset_readInd(void) MPI_Comm comm = test_comm; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const test_params_t *)params)->filename; if (VERBOSE_MED) printf("Independent read test on file %s\n", filename); @@ -967,8 +885,8 @@ dataset_readInd(void) * each process controls a hyperslab within.] */ -void -dataset_writeAll(void) +static void +dataset_writeAll(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -1000,7 +918,7 @@ dataset_writeAll(void) MPI_Comm comm = test_comm; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const test_params_t *)params)->filename; if (VERBOSE_MED) printf("Collective write test on file %s\n", filename); @@ -1348,6 +1266,22 @@ dataset_writeAll(void) /* Dataset5: point selection in File - Hyperslab selection in Memory*/ /* create a file dataspace independently */ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + if (VERBOSE_MED) { + hsize_t k = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t i = 0; i < num_points; i++) { + printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += MAX_RANK; + } + } + file_dataspace = H5Dget_space(dataset5); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1384,6 +1318,22 @@ dataset_writeAll(void) start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank; start[1] = 0; point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + if (VERBOSE_MED) { + hsize_t k = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t i = 0; i < num_points; i++) { + printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += MAX_RANK; + } + } + file_dataspace = H5Dget_space(dataset6); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1392,6 +1342,22 @@ dataset_writeAll(void) start[0] = 0; start[1] = 0; point_set(start, count, stride, block, num_points, coords, IN_ORDER); + if (VERBOSE_MED) { + hsize_t k = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t i = 0; i < num_points; i++) { + printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += MAX_RANK; + } + } + mem_dataspace = H5Dget_space(dataset6); VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1421,6 +1387,22 @@ dataset_writeAll(void) start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank; start[1] = 0; point_set(start, count, stride, block, num_points, coords, IN_ORDER); + if (VERBOSE_MED) { + hsize_t k = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t i = 0; i < num_points; i++) { + printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += MAX_RANK; + } + } + file_dataspace = H5Dget_space(dataset7); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1489,8 +1471,8 @@ dataset_writeAll(void) * each process controls a hyperslab within.] */ -void -dataset_readAll(void) +static void +dataset_readAll(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -1516,7 +1498,7 @@ dataset_readAll(void) MPI_Comm comm = test_comm; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const test_params_t *)params)->filename; if (VERBOSE_MED) printf("Collective read test on file %s\n", filename); @@ -1740,6 +1722,22 @@ dataset_readAll(void) start[0] = 0; start[1] = 0; point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + if (VERBOSE_MED) { + hsize_t idx = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t point = 0; point < num_points; point++) { + printf("(%d, %d)\n", (int)coords[idx], (int)coords[idx + 1]); + idx += MAX_RANK; + } + } + mem_dataspace = H5Dget_space(dataset5); VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1778,6 +1776,22 @@ dataset_readAll(void) start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank; start[1] = 0; point_set(start, count, stride, block, num_points, coords, IN_ORDER); + if (VERBOSE_MED) { + hsize_t idx = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t point = 0; point < num_points; point++) { + printf("(%d, %d)\n", (int)coords[idx], (int)coords[idx + 1]); + idx += MAX_RANK; + } + } + file_dataspace = H5Dget_space(dataset6); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1786,6 +1800,22 @@ dataset_readAll(void) start[0] = 0; start[1] = 0; point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + if (VERBOSE_MED) { + hsize_t idx = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t point = 0; point < num_points; point++) { + printf("(%d, %d)\n", (int)coords[idx], (int)coords[idx + 1]); + idx += MAX_RANK; + } + } + mem_dataspace = H5Dget_space(dataset6); VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1892,324 +1922,96 @@ dataset_readAll(void) } /* - * Part 2--Independent read/write for extendible datasets. - */ - -/* - * Example of using the parallel HDF5 library to create two extendible - * datasets in one HDF5 file with independent parallel MPIO access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. + * Example of using the parallel HDF5 library to create an extendable dataset + * and perform I/O on it in a way that verifies that the chunk cache is + * bypassed for parallel I/O. */ -void -extend_writeInd(void) +static void +extend_writeInd2(const void *params) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ const char *filename; - hsize_t dims[MAX_RANK]; /* dataset dim sizes */ - hsize_t max_dims[MAX_RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */ - hid_t dataset_pl; /* dataset create prop. list */ - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK]; /* for hyperslab setting */ - hsize_t stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; + hid_t fid; /* HDF5 file ID */ + hid_t fapl_id; /* File access templates */ + hid_t fs; /* File dataspace ID */ + hid_t ms; /* Memory dataspace ID */ + hid_t dataset; /* Dataset ID */ + hsize_t orig_size = 10; /* Original dataset dim size */ + hsize_t new_size = 20; /* Extended dataset dim size */ + hsize_t one = 1; + hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */ + hsize_t chunk_size = 16384; /* chunk size */ + hid_t dcpl; /* dataset create prop. list */ + int written[10], /* Data to write */ + retrieved[10]; /* Data read in */ + int mpi_size, mpi_rank; /* MPI settings */ + int i; /* Local index variable */ + herr_t ret; /* Generic return value */ - filename = GetTestParameters(); + filename = ((const test_params_t *)params)->filename; if (VERBOSE_MED) - printf("Extend independent write test on file %s\n", filename); + printf("Extend independent write test #2 on file %s\n", filename); /* set up MPI parameters */ MPI_Comm_size(test_comm, &mpi_size); MPI_Comm_rank(test_comm, &mpi_rank); - /* setup chunk-size. Make sure sizes are > 0 */ - chunk_dims[0] = (hsize_t)chunkdim0; - chunk_dims[1] = (hsize_t)chunkdim1; - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - /* ------------------- * START AN HDF5 FILE * -------------------*/ /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* Reduce the number of metadata cache slots, so that there are cache - * collisions during the raw data I/O on the chunked dataset. This stresses - * the metadata cache and tests for cache bugs. -QAK - */ - { - int mdc_nelmts; - size_t rdcc_nelmts; - size_t rdcc_nbytes; - double rdcc_w0; - - ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); - VRFY((ret >= 0), "H5Pget_cache succeeded"); - mdc_nelmts = 4; - ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); - VRFY((ret >= 0), "H5Pset_cache succeeded"); - } + fapl_id = create_faccess_plist(test_comm, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); VRFY((fid >= 0), "H5Fcreate succeeded"); /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); + ret = H5Pclose(fapl_id); + VRFY((ret >= 0), "H5Pclose succeeded"); /* -------------------------------------------------------------- * Define the dimensions of the overall datasets and create them. * ------------------------------------------------------------- */ /* set up dataset storage chunk sizes and creation property list */ - if (VERBOSE_MED) - printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); - dataset_pl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims); + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_chunk(dcpl, 1, &chunk_size); VRFY((ret >= 0), "H5Pset_chunk succeeded"); /* setup dimensionality object */ - /* start out with no rows, extend it later. */ - dims[0] = dims[1] = 0; - sid = H5Screate_simple(MAX_RANK, dims, max_dims); - VRFY((sid >= 0), "H5Screate_simple succeeded"); + fs = H5Screate_simple(1, &orig_size, &max_size); + VRFY((fs >= 0), "H5Screate_simple succeeded"); /* create an extendible dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another extendible dataset collectively */ - dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); + dataset = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, fs, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreat2e succeeded"); /* release resource */ - H5Sclose(sid); - H5Pclose(dataset_pl); + ret = H5Pclose(dcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); /* ------------------------- - * Test writing to dataset1 + * Test writing to dataset * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + /* create a memory dataspace independently */ + ms = H5Screate_simple(1, &orig_size, &max_size); + VRFY((ms >= 0), "H5Screate_simple succeeded"); /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); + for (i = 0; i < (int)orig_size; i++) + written[i] = i; + MESG("data array initialized"); if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); + MESG("writing at offset zero: "); + for (i = 0; i < (int)orig_size; i++) + printf("%s%d", i ? ", " : "", written[i]); + printf("\n"); } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* Extend its current dim sizes before writing */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - ret = H5Dset_extent(dataset1, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - - /* ------------------------- - * Test writing to dataset2 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* Try write to dataset2 beyond its current dim sizes. Should fail. */ - /* Temporary turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently. Should fail. */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret < 0), "H5Dwrite failed as expected"); - - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); - H5Sclose(file_dataspace); - - /* Extend dataset2 and try again. Should succeed. */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - ret = H5Dset_extent(dataset2, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - ret = H5Sclose(file_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Sclose(mem_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (data_array1) - free(data_array1); -} - -/* - * Example of using the parallel HDF5 library to create an extendable dataset - * and perform I/O on it in a way that verifies that the chunk cache is - * bypassed for parallel I/O. - */ - -void -extend_writeInd2(void) -{ - const char *filename; - hid_t fid; /* HDF5 file ID */ - hid_t fapl_id; /* File access templates */ - hid_t fs; /* File dataspace ID */ - hid_t ms; /* Memory dataspace ID */ - hid_t dataset; /* Dataset ID */ - hsize_t orig_size = 10; /* Original dataset dim size */ - hsize_t new_size = 20; /* Extended dataset dim size */ - hsize_t one = 1; - hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */ - hsize_t chunk_size = 16384; /* chunk size */ - hid_t dcpl; /* dataset create prop. list */ - int written[10], /* Data to write */ - retrieved[10]; /* Data read in */ - int mpi_size, mpi_rank; /* MPI settings */ - int i; /* Local index variable */ - herr_t ret; /* Generic return value */ - - filename = GetTestParameters(); - if (VERBOSE_MED) - printf("Extend independent write test #2 on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - fapl_id = create_faccess_plist(test_comm, MPI_INFO_NULL, facc_type); - VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(fapl_id); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* -------------------------------------------------------------- - * Define the dimensions of the overall datasets and create them. - * ------------------------------------------------------------- */ - - /* set up dataset storage chunk sizes and creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dcpl, 1, &chunk_size); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* setup dimensionality object */ - fs = H5Screate_simple(1, &orig_size, &max_size); - VRFY((fs >= 0), "H5Screate_simple succeeded"); - - /* create an extendible dataset collectively */ - dataset = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, fs, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreat2e succeeded"); - - /* release resource */ - ret = H5Pclose(dcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* ------------------------- - * Test writing to dataset - * -------------------------*/ - /* create a memory dataspace independently */ - ms = H5Screate_simple(1, &orig_size, &max_size); - VRFY((ms >= 0), "H5Screate_simple succeeded"); - - /* put some trivial data in the data_array */ - for (i = 0; i < (int)orig_size; i++) - written[i] = i; - MESG("data array initialized"); - if (VERBOSE_MED) { - MESG("writing at offset zero: "); - for (i = 0; i < (int)orig_size; i++) - printf("%s%d", i ? ", " : "", written[i]); - printf("\n"); - } - ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written); - VRFY((ret >= 0), "H5Dwrite succeeded"); + ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written); + VRFY((ret >= 0), "H5Dwrite succeeded"); /* ------------------------- * Read initial data from dataset. @@ -2279,617 +2081,8 @@ extend_writeInd2(void) VRFY((ret >= 0), "H5Dclose succeeded"); /* Close the file collectively */ - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); -} - -/* Example of using the parallel HDF5 library to read an extendible dataset */ -void -extend_readInd(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - hsize_t dims[MAX_RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_array2 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ - const char *filename; - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if (VERBOSE_MED) - printf("Extend independent read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - data_array2 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array2 != NULL), "data_array2 malloc succeeded"); - data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); - VRFY((fid >= 0), ""); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* open the dataset1 collectively */ - dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset1 >= 0), ""); - - /* open another dataset collectively */ - dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset2 >= 0), ""); - - /* Try extend dataset1 which is open RDONLY. Should fail. */ - /* first turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); - VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); - dims[0]++; - ret = H5Dset_extent(dataset1, dims); - VRFY((ret < 0), "H5Dset_extent failed as expected"); - - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); - H5Sclose(file_dataspace); - - /* Read dataset1 using BYROW pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* read data independently */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset1 read verified correct"); - if (ret) - nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - - /* Read dataset2 using BYCOL pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset2 read verified correct"); - if (ret) - nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), ""); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), ""); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (data_array1) - free(data_array1); - if (data_array2) - free(data_array2); - if (data_origin1) - free(data_origin1); -} - -/* - * Part 3--Collective read/write for extendible datasets. - */ - -/* - * Example of using the parallel HDF5 library to create two extendible - * datasets in one HDF5 file with collective parallel MPIO access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. - */ - -void -extend_writeAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - const char *filename; - hsize_t dims[MAX_RANK]; /* dataset dim sizes */ - hsize_t max_dims[MAX_RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */ - hid_t dataset_pl; /* dataset create prop. list */ - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK]; /* for hyperslab setting */ - hsize_t stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if (VERBOSE_MED) - printf("Extend independent write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - /* setup chunk-size. Make sure sizes are > 0 */ - chunk_dims[0] = (hsize_t)chunkdim0; - chunk_dims[1] = (hsize_t)chunkdim1; - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* Reduce the number of metadata cache slots, so that there are cache - * collisions during the raw data I/O on the chunked dataset. This stresses - * the metadata cache and tests for cache bugs. -QAK - */ - { - int mdc_nelmts; - size_t rdcc_nelmts; - size_t rdcc_nbytes; - double rdcc_w0; - - ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); - VRFY((ret >= 0), "H5Pget_cache succeeded"); - mdc_nelmts = 4; - ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); - VRFY((ret >= 0), "H5Pset_cache succeeded"); - } - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* -------------------------------------------------------------- - * Define the dimensions of the overall datasets and create them. - * ------------------------------------------------------------- */ - - /* set up dataset storage chunk sizes and creation property list */ - if (VERBOSE_MED) - printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); - dataset_pl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* setup dimensionality object */ - /* start out with no rows, extend it later. */ - dims[0] = dims[1] = 0; - sid = H5Screate_simple(MAX_RANK, dims, max_dims); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* create an extendible dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another extendible dataset collectively */ - dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - - /* release resource */ - H5Sclose(sid); - H5Pclose(dataset_pl); - - /* ------------------------- - * Test writing to dataset1 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* Extend its current dim sizes before writing */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - ret = H5Dset_extent(dataset1, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* write data collectively */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* ------------------------- - * Test writing to dataset2 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* Try write to dataset2 beyond its current dim sizes. Should fail. */ - /* Temporary turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently. Should fail. */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret < 0), "H5Dwrite failed as expected"); - - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); - H5Sclose(file_dataspace); - - /* Extend dataset2 and try again. Should succeed. */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - ret = H5Dset_extent(dataset2, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - ret = H5Sclose(file_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Sclose(mem_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Pclose(xfer_plist); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (data_array1) - free(data_array1); -} - -/* Example of using the parallel HDF5 library to read an extendible dataset */ -void -extend_readAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - const char *filename; - hsize_t dims[MAX_RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_array2 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if (VERBOSE_MED) - printf("Extend independent read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - data_array2 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array2 != NULL), "data_array2 malloc succeeded"); - data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); - VRFY((fid >= 0), ""); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* open the dataset1 collectively */ - dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset1 >= 0), ""); - - /* open another dataset collectively */ - dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset2 >= 0), ""); - - /* Try extend dataset1 which is open RDONLY. Should fail. */ - /* first turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); - VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); - dims[0]++; - ret = H5Dset_extent(dataset1, dims); - VRFY((ret < 0), "H5Dset_extent failed as expected"); - - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); - H5Sclose(file_dataspace); - - /* Read dataset1 using BYROW pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset1 read verified correct"); - if (ret) - nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - H5Pclose(xfer_plist); - - /* Read dataset2 using BYCOL pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset2 read verified correct"); - if (ret) - nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - H5Pclose(xfer_plist); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), ""); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), ""); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (data_array1) - free(data_array1); - if (data_array2) - free(data_array2); - if (data_origin1) - free(data_origin1); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); } /* @@ -2897,8 +2090,8 @@ extend_readAll(void) * dataset in an HDF5 file with collective parallel access support. */ #ifdef H5_HAVE_FILTER_DEFLATE -void -compress_readAll(void) +static void +compress_readAll(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -2919,7 +2112,7 @@ compress_readAll(void) int mpi_size, mpi_rank; herr_t ret; /* Generic return value */ - filename = GetTestParameters(); + filename = ((const test_params_t *)params)->filename; if (VERBOSE_MED) printf("Collective chunked dataset read test on file %s\n", filename); @@ -3082,8 +2275,8 @@ compress_readAll(void) * dataset with the exception that one processor selects no element. */ -void -none_selection_chunk(void) +static void +none_selection_chunk(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -3111,7 +2304,7 @@ none_selection_chunk(void) MPI_Comm comm = test_comm; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const test_params_t *)params)->filename; if (VERBOSE_MED) printf("Extend independent write test on file %s\n", filename); @@ -3281,992 +2474,13 @@ none_selection_chunk(void) free(data_array); } -/* Function: test_actual_io_mode - * - * Purpose: tests one specific case of collective I/O and checks that the - * actual_chunk_opt_mode property and the actual_io_mode - * properties in the DXPL have the correct values. - * - * Input: selection_mode: changes the way processes select data from the space, as well - * as some dxpl flags to get collective I/O to break in different ways. - * - * The relevant I/O function and expected response for each mode: - * TEST_ACTUAL_IO_MULTI_CHUNK_IND: - * H5D_mpi_chunk_collective_io, each process reports independent I/O - * - * TEST_ACTUAL_IO_MULTI_CHUNK_COL: - * H5D_mpi_chunk_collective_io, each process reports collective I/O - * - * TEST_ACTUAL_IO_MULTI_CHUNK_MIX: - * H5D_mpi_chunk_collective_io, each process reports mixed I/O - * - * TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: - * H5D_mpi_chunk_collective_io, processes disagree. The root reports - * collective, the rest report independent I/O - * - * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: - * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND. - * Set directly go to multi-chunk-io without num threshold calc. - * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL: - * Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL. - * Set directly go to multi-chunk-io without num threshold calc. - * - * TEST_ACTUAL_IO_LINK_CHUNK: - * H5D_link_chunk_collective_io, processes report linked chunk I/O - * - * TEST_ACTUAL_IO_CONTIGUOUS: - * H5D__contig_collective_write or H5D__contig_collective_read - * each process reports contiguous collective I/O - * - * TEST_ACTUAL_IO_NO_COLLECTIVE: - * Simple independent I/O. This tests that the defaults are properly set. - * - * TEST_ACTUAL_IO_RESET: - * Performs collective and then independent I/O with the same dxpl to - * make sure the property is correctly reset to the default on each use. - * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE - * (The most complex case that works on all builds) and then performs - * an independent read and write with the same dxpls. - * - * Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE - * is not needed as they are covered by DIRECT_CHUNK_MIX and - * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing - * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO instead of num-threshold. - */ -static void -test_actual_io_mode(int selection_mode) -{ - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - H5D_mpio_actual_io_mode_t actual_io_mode_write = H5D_MPIO_NO_COLLECTIVE; - H5D_mpio_actual_io_mode_t actual_io_mode_read = H5D_MPIO_NO_COLLECTIVE; - H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; - const char *filename; - const char *test_name; - bool direct_multi_chunk_io; - bool multi_chunk_io; - bool is_chunked; - bool is_collective; - int mpi_size = -1; - int mpi_rank = -1; - int length; - int *buffer; - int i; - MPI_Comm mpi_comm = MPI_COMM_NULL; - MPI_Info mpi_info = MPI_INFO_NULL; - hid_t fid = H5I_INVALID_HID; - hid_t sid = H5I_INVALID_HID; - hid_t dataset = H5I_INVALID_HID; - hid_t data_type = H5T_NATIVE_INT; - hid_t fapl_id = H5I_INVALID_HID; - hid_t mem_space = H5I_INVALID_HID; - hid_t file_space = H5I_INVALID_HID; - hid_t dcpl = H5I_INVALID_HID; - hid_t dxpl_write = H5I_INVALID_HID; - hid_t dxpl_read = H5I_INVALID_HID; - hsize_t dims[MAX_RANK]; - hsize_t chunk_dims[MAX_RANK]; - hsize_t start[MAX_RANK]; - hsize_t stride[MAX_RANK]; - hsize_t count[MAX_RANK]; - hsize_t block[MAX_RANK]; - char message[256]; - herr_t ret; - - /* Set up some flags to make some future if statements slightly more readable */ - direct_multi_chunk_io = (selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND || - selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); - - /* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then - * tests independent I/O - */ - multi_chunk_io = - (selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || selection_mode == TEST_ACTUAL_IO_RESET); - - is_chunked = - (selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE); - - is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE; - - /* Set up MPI parameters */ - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - MPI_Barrier(test_comm); - - assert(mpi_size >= 1); - - mpi_comm = test_comm; - mpi_info = MPI_INFO_NULL; - - filename = (const char *)GetTestParameters(); - assert(filename != NULL); - - /* Setup the file access template */ - fapl_id = create_faccess_plist(mpi_comm, mpi_info, facc_type); - VRFY((fapl_id >= 0), "create_faccess_plist() succeeded"); - - /* Create the file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Create the basic Space */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - sid = H5Screate_simple(MAX_RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* Create the dataset creation plist */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "dataset creation plist created successfully"); - - /* If we are not testing contiguous datasets */ - if (is_chunked) { - /* Set up chunk information. */ - chunk_dims[0] = dims[0] / (hsize_t)mpi_size; - chunk_dims[1] = dims[1]; - ret = H5Pset_chunk(dcpl, 2, chunk_dims); - VRFY((ret >= 0), "chunk creation property list succeeded"); - } - - /* Create the dataset */ - dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); - - /* Create the file dataspace */ - file_space = H5Dget_space(dataset); - VRFY((file_space >= 0), "H5Dget_space succeeded"); - - /* Choose a selection method based on the type of I/O we want to occur, - * and also set up some selection-dependeent test info. */ - switch (selection_mode) { - - /* Independent I/O with optimization */ - case TEST_ACTUAL_IO_MULTI_CHUNK_IND: - case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: - /* Since the dataset is chunked by row and each process selects a row, - * each process writes to a different chunk. This forces all I/O to be - * independent. - */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Multi Chunk - Independent"; - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; - break; - - /* Collective I/O with optimization */ - case TEST_ACTUAL_IO_MULTI_CHUNK_COL: - case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL: - /* The dataset is chunked by rows, so each process takes a column which - * spans all chunks. Since the processes write non-overlapping regular - * selections to each chunk, the operation is purely collective. - */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - test_name = "Multi Chunk - Collective"; - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - if (mpi_size > 1) - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; - else - actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; - break; - - /* Mixed I/O with optimization */ - case TEST_ACTUAL_IO_MULTI_CHUNK_MIX: - /* A chunk will be assigned collective I/O only if it is selected by each - * process. To get mixed I/O, have the root select all chunks and each - * subsequent process select the first and nth chunk. The first chunk, - * accessed by all, will be assigned collective I/O while each other chunk - * will be accessed only by the root and the nth process and will be - * assigned independent I/O. Each process will access one chunk collectively - * and at least one chunk independently, reporting mixed I/O. - */ - - if (mpi_rank == 0) { - /* Select the first column */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - } - else { - /* Select the first and the nth chunk in the nth column */ - block[0] = (hsize_t)(dim0 / mpi_size); - block[1] = (hsize_t)(dim1 / mpi_size); - count[0] = 2; - count[1] = 1; - stride[0] = (hsize_t)mpi_rank * block[0]; - stride[1] = 1; - start[0] = 0; - start[1] = (hsize_t)mpi_rank * block[1]; - } - - test_name = "Multi Chunk - Mixed"; - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; - break; - - /* RESET tests that the properties are properly reset to defaults each time I/O is - * performed. To achieve this, we have RESET perform collective I/O (which would change - * the values from the defaults) followed by independent I/O (which should report the - * default values). RESET doesn't need to have a unique selection, so we reuse - * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works - * on all builds. The independent section of RESET can be found at the end of this function. - */ - case TEST_ACTUAL_IO_RESET: - - /* Mixed I/O with optimization and internal disagreement */ - case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: - /* A chunk will be assigned collective I/O only if it is selected by each - * process. To get mixed I/O with disagreement, assign process n to the - * first chunk and the nth chunk. The first chunk, selected by all, is - * assgigned collective I/O, while each other process gets independent I/O. - * Since the root process with only access the first chunk, it will report - * collective I/O. The subsequent processes will access the first chunk - * collectively, and their other chunk independently, reporting mixed I/O. - */ - - if (mpi_rank == 0) { - /* Select the first chunk in the first column */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - block[0] = block[0] / (hsize_t)mpi_size; - } - else { - /* Select the first and the nth chunk in the nth column */ - block[0] = (hsize_t)(dim0 / mpi_size); - block[1] = (hsize_t)(dim1 / mpi_size); - count[0] = 2; - count[1] = 1; - stride[0] = (hsize_t)mpi_rank * block[0]; - stride[1] = 1; - start[0] = 0; - start[1] = (hsize_t)mpi_rank * block[1]; - } - - /* If the testname was not already set by the RESET case */ - if (selection_mode == TEST_ACTUAL_IO_RESET) - test_name = "RESET"; - else - test_name = "Multi Chunk - Mixed (Disagreement)"; - - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - if (mpi_size > 1) { - if (mpi_rank == 0) - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; - else - actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; - } - else - actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; - - break; - - /* Linked Chunk I/O */ - case TEST_ACTUAL_IO_LINK_CHUNK: - /* Nothing special; link chunk I/O is forced in the dxpl settings. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Link Chunk"; - actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; - break; - - /* Contiguous Dataset */ - case TEST_ACTUAL_IO_CONTIGUOUS: - /* A non overlapping, regular selection in a contiguous dataset leads to - * collective I/O */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Contiguous"; - actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE; - break; - - case TEST_ACTUAL_IO_NO_COLLECTIVE: - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Independent"; - actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; - break; - - default: - test_name = "Undefined Selection Mode"; - actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; - break; - } - - ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* Create a memory dataspace mirroring the dataset and select the same hyperslab - * as in the file space. - */ - mem_space = H5Screate_simple(MAX_RANK, dims, NULL); - VRFY((mem_space >= 0), "mem_space created"); - - ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* Get the number of elements in the selection */ - length = dim0 * dim1; - - /* Allocate and initialize the buffer */ - buffer = (int *)malloc(sizeof(int) * (size_t)length); - VRFY((buffer != NULL), "malloc of buffer succeeded"); - for (i = 0; i < length; i++) - buffer[i] = i; - - /* Set up the dxpl for the write */ - dxpl_write = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - /* Set collective I/O properties in the dxpl. */ - if (is_collective) { - /* Request collective I/O */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* Set the threshold number of processes per chunk to twice mpi_size. - * This will prevent the threshold from ever being met, thus forcing - * multi chunk io instead of link chunk io. - * This is via default. - */ - if (multi_chunk_io) { - /* force multi-chunk-io by threshold */ - ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2); - VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded"); - - /* set this to manipulate testing scenario about allocating processes - * to chunks */ - ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99); - VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded"); - } - - /* Set directly go to multi-chunk-io without threshold calc. */ - if (direct_multi_chunk_io) { - /* set for multi chunk io by property*/ - ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - } - - /* Make a copy of the dxpl to test the read operation */ - dxpl_read = H5Pcopy(dxpl_write); - VRFY((dxpl_read >= 0), "H5Pcopy succeeded"); - - /* Write */ - ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); - if (ret < 0) - H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - /* Retrieve Actual io values */ - ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); - VRFY((ret >= 0), "retrieving actual io mode succeeded"); - - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); - VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); - - /* Read */ - ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); - if (ret < 0) - H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); - - /* Retrieve Actual io values */ - ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); - VRFY((ret >= 0), "retrieving actual io mode succeeded"); - - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); - VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); - - /* Check write vs read */ - VRFY((actual_io_mode_read == actual_io_mode_write), - "reading and writing are the same for actual_io_mode"); - VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write), - "reading and writing are the same for actual_chunk_opt_mode"); - - /* Test values */ - if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 && - actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) { - snprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n", - test_name); - VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message); - snprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name); - VRFY((actual_io_mode_write == actual_io_mode_expected), message); - } - else { - fprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, actual_chunk_opt_mode_write, - actual_io_mode_write); - } - - /* To test that the property is successfully reset to the default, we perform some - * independent I/O after the collective I/O - */ - if (selection_mode == TEST_ACTUAL_IO_RESET) { - if (mpi_rank == 0) { - /* Switch to independent io */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - ret = H5Pset_dxpl_mpio(dxpl_read, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* Write */ - ret = H5Dwrite(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_write, buffer); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - /* Check Properties */ - ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); - VRFY((ret >= 0), "retrieving actual io mode succeeded"); - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); - VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); - - VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION, - "actual_chunk_opt_mode has correct value for reset write (independent)"); - VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE, - "actual_io_mode has correct value for reset write (independent)"); - - /* Read */ - ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - /* Check Properties */ - ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); - VRFY((ret >= 0), "retrieving actual io mode succeeded"); - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); - VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); - - VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION, - "actual_chunk_opt_mode has correct value for reset read (independent)"); - VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE, - "actual_io_mode has correct value for reset read (independent)"); - } - } - - /* Release some resources */ - ret = H5Sclose(sid); - ret = H5Pclose(fapl_id); - ret = H5Pclose(dcpl); - ret = H5Pclose(dxpl_write); - ret = H5Pclose(dxpl_read); - ret = H5Dclose(dataset); - ret = H5Sclose(mem_space); - ret = H5Sclose(file_space); - ret = H5Fclose(fid); - free(buffer); - return; -} - -/* Function: actual_io_mode_tests - * - * Purpose: Tests all possible cases of the actual_io_mode property. - */ -void -actual_io_mode_tests(void) -{ - int mpi_size = -1; - int mpi_rank = -1; - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_size(test_comm, &mpi_rank); - - test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); - - /* - * Test multi-chunk-io via proc_num threshold - */ - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); - - /* The Multi Chunk Mixed test requires at least three processes. */ - if (mpi_size > 2) - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); - else - fprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n"); - - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); - - /* - * Test multi-chunk-io via setting direct property - */ - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); - - test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK); - test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS); - - test_actual_io_mode(TEST_ACTUAL_IO_RESET); - return; -} - -/* - * Function: test_no_collective_cause_mode - * - * Purpose: - * tests cases for broken collective I/O and checks that the - * H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values. - * - * Input: - * selection_mode: various mode to cause broken collective I/O - * Note: Originally, each TEST case is supposed to be used alone. - * After some discussion, this is updated to take multiple TEST cases - * with '|'. However there is no error check for any of combined - * test cases, so a tester is responsible to understand and feed - * proper combination of TESTs if needed. - * - * - * TEST_COLLECTIVE: - * Test for regular collective I/O without cause of breaking. - * Just to test normal behavior. - * - * TEST_SET_INDEPENDENT: - * Test for Independent I/O as the cause of breaking collective I/O. - * - * TEST_DATATYPE_CONVERSION: - * Test for Data Type Conversion as the cause of breaking collective I/O. - * - * TEST_DATA_TRANSFORMS: - * Test for Data Transform feature as the cause of breaking collective I/O. - * - * TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES: - * Test for NULL dataspace as the cause of breaking collective I/O. - * - * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT: - * Test for Compact layout as the cause of breaking collective I/O. - * - * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL: - * Test for Externl-File storage as the cause of breaking collective I/O. - */ -#define FILE_EXTERNAL "nocolcause_extern.data" -static void -test_no_collective_cause_mode(int selection_mode) -{ - uint32_t no_collective_cause_local_write = 0; - uint32_t no_collective_cause_local_read = 0; - uint32_t no_collective_cause_local_expected = 0; - uint32_t no_collective_cause_global_write = 0; - uint32_t no_collective_cause_global_read = 0; - uint32_t no_collective_cause_global_expected = 0; - // hsize_t coord[NELM][MAX_RANK]; - - uint32_t no_selection_io_cause_write = 0; - uint32_t no_selection_io_cause_read = 0; - uint32_t no_selection_io_cause_expected = 0; - - const char *filename; - const char *test_name; - bool is_chunked = 1; - bool is_independent = 0; - int mpi_size = -1; - int mpi_rank = -1; - int length; - int *buffer; - int i; - MPI_Comm mpi_comm; - MPI_Info mpi_info; - hid_t fid = H5I_INVALID_HID; - hid_t sid = H5I_INVALID_HID; - hid_t dataset = H5I_INVALID_HID; - hid_t data_type = H5T_NATIVE_INT; - hid_t fapl_id = H5I_INVALID_HID; - hid_t dcpl = H5I_INVALID_HID; - hid_t dxpl_write = H5I_INVALID_HID; - hid_t dxpl_read = H5I_INVALID_HID; - hsize_t dims[MAX_RANK]; - hid_t mem_space = H5I_INVALID_HID; - hid_t file_space = H5I_INVALID_HID; - hsize_t chunk_dims[MAX_RANK]; - herr_t ret; - /* set to global value as default */ - int l_facc_type = facc_type; - char message[256]; - - /* Set up MPI parameters */ - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - MPI_Barrier(test_comm); - - assert(mpi_size >= 1); - - mpi_comm = test_comm; - mpi_info = MPI_INFO_NULL; - - /* Create the dataset creation plist */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "dataset creation plist created successfully"); - - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) { - ret = H5Pset_layout(dcpl, H5D_COMPACT); - VRFY((ret >= 0), "set COMPACT layout succeeded"); - is_chunked = 0; - } - - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { - ret = H5Pset_external(dcpl, FILE_EXTERNAL, 0, H5F_UNLIMITED); - VRFY((ret >= 0), "set EXTERNAL file layout succeeded"); - is_chunked = 0; - } - - if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { - sid = H5Screate(H5S_NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - is_chunked = 0; - } - else { - /* Create the basic Space */ - /* if this is a compact dataset, create a small dataspace that does not exceed 64K */ - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) { - dims[0] = BIG_X_FACTOR * 6; - dims[1] = BIG_Y_FACTOR * 6; - } - else { - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - } - sid = H5Screate_simple(MAX_RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - } - - filename = (const char *)GetTestParameters(); - assert(filename != NULL); - - /* Setup the file access template */ - fapl_id = create_faccess_plist(mpi_comm, mpi_info, l_facc_type); - VRFY((fapl_id >= 0), "create_faccess_plist() succeeded"); - - /* Create the file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* If we are not testing contiguous datasets */ - if (is_chunked) { - /* Set up chunk information. */ - chunk_dims[0] = dims[0] / (hsize_t)mpi_size; - chunk_dims[1] = dims[1]; - ret = H5Pset_chunk(dcpl, 2, chunk_dims); - VRFY((ret >= 0), "chunk creation property list succeeded"); - } - - /* Create the dataset */ - dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); - - /* Set up the dxpl for the write */ - dxpl_write = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - /* - * Set expected causes and some tweaks based on the type of test - */ - if (selection_mode & TEST_DATATYPE_CONVERSION) { - test_name = "Broken Collective I/O - Datatype Conversion"; - - /* set different sign to trigger type conversion */ - data_type = H5T_NATIVE_UINT; - - /* Disable selection I/O since datatype conversion is supported in collective with selection I/O */ - ret = H5Pset_selection_io(dxpl_write, H5D_SELECTION_IO_MODE_OFF); - VRFY((ret >= 0), "H5Pset_selection_io succeeded"); - - no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION | H5D_MPIO_NO_SELECTION_IO; - no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION | H5D_MPIO_NO_SELECTION_IO; - no_selection_io_cause_expected |= H5D_SEL_IO_DISABLE_BY_API; - } - - if (selection_mode & TEST_DATA_TRANSFORMS) { - test_name = "Broken Collective I/O - DATA Transforms"; - - /* Set transform */ - ret = H5Pset_data_transform(dxpl_write, "x+1"); - VRFY((ret >= 0), "H5Pset_data_transform succeeded"); - - /* Disable selection I/O since data transforms are supported in collective with selection I/O */ - ret = H5Pset_selection_io(dxpl_write, H5D_SELECTION_IO_MODE_OFF); - VRFY((ret >= 0), "H5Pset_selection_io succeeded"); - - no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS | H5D_MPIO_NO_SELECTION_IO; - no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS | H5D_MPIO_NO_SELECTION_IO; - no_selection_io_cause_expected |= H5D_SEL_IO_DISABLE_BY_API; - } - - if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { - test_name = "Broken Collective I/O - No Simple or Scalar DataSpace"; - no_collective_cause_local_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; - no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; - no_collective_cause_local_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; - no_collective_cause_global_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; - } - - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT || - selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { - test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset"; - no_collective_cause_local_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; - no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; - no_collective_cause_local_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; - no_collective_cause_global_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; - } - - if (selection_mode & TEST_COLLECTIVE) { - test_name = "Broken Collective I/O - Not Broken"; - no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE; - no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE; - } - - if (selection_mode & TEST_SET_INDEPENDENT) { - test_name = "Broken Collective I/O - Independent"; - no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT; - no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT; - no_collective_cause_local_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; - no_collective_cause_global_expected &= ~(unsigned)H5D_MPIO_NO_SELECTION_IO; - /* switch to independent io */ - is_independent = 1; - } - - /* use all spaces for certain tests */ - if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES || - selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { - file_space = H5S_ALL; - mem_space = H5S_ALL; - } - else { - /* Get the file dataspace */ - file_space = H5Dget_space(dataset); - VRFY((file_space >= 0), "H5Dget_space succeeded"); - - /* Create the memory dataspace */ - mem_space = H5Screate_simple(MAX_RANK, dims, NULL); - VRFY((mem_space >= 0), "mem_space created"); - } - - /* Get the number of elements in the selection */ - H5_CHECKED_ASSIGN(length, int, dims[0] * dims[1], hsize_t); - - /* Allocate and initialize the buffer */ - buffer = (int *)malloc(sizeof(int) * (size_t)length); - VRFY((buffer != NULL), "malloc of buffer succeeded"); - for (i = 0; i < length; i++) - buffer[i] = i; - - if (is_independent) { - /* Set Independent I/O */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - else { - /* Set Collective I/O */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - - /*--------------------- - * Test Write access - *---------------------*/ - - /* Write */ - ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); - if (ret < 0) - H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - /* Get the cause of broken collective I/O */ - ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write, - &no_collective_cause_global_write); - VRFY((ret >= 0), "retrieving no collective cause succeeded"); - - ret = H5Pget_no_selection_io_cause(dxpl_write, &no_selection_io_cause_write); - VRFY((ret >= 0), "retrieving no selection io cause succeeded"); - - if (no_collective_cause_local_write & H5D_MPIO_NO_SELECTION_IO) { - VRFY((no_selection_io_cause_write == no_selection_io_cause_expected), - "H5D_MPIO_NO_SELECTION_IO for write is as expected"); - } - - if (no_collective_cause_global_write & H5D_MPIO_NO_SELECTION_IO) { - - VRFY((no_selection_io_cause_write == no_selection_io_cause_expected), - "H5D_MPIO_NO_SELECTION_IO for write is as expected"); - } - - /*--------------------- - * Test Read access - *---------------------*/ - - /* Make a copy of the dxpl to test the read operation */ - dxpl_read = H5Pcopy(dxpl_write); - VRFY((dxpl_read >= 0), "H5Pcopy succeeded"); - - /* Read */ - ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); - - if (ret < 0) - H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); - - /* Get the cause of broken collective I/O */ - ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read, - &no_collective_cause_global_read); - VRFY((ret >= 0), "retrieving no collective cause succeeded"); - - ret = H5Pget_no_selection_io_cause(dxpl_read, &no_selection_io_cause_read); - VRFY((ret >= 0), "retrieving no selection io cause succeeded"); - - if (no_collective_cause_local_read & H5D_MPIO_NO_SELECTION_IO) { - - VRFY((no_selection_io_cause_read == no_selection_io_cause_expected), - "H5D_MPIO_NO_SELECTION_IO for read is as expected"); - } - - if (no_collective_cause_global_read & H5D_MPIO_NO_SELECTION_IO) { - - VRFY((no_selection_io_cause_read == no_selection_io_cause_expected), - "H5D_MPIO_NO_SELECTION_IO for read is as expected"); - } - - /* Check write vs read */ - VRFY((no_collective_cause_local_read == no_collective_cause_local_write), - "reading and writing are the same for local cause of Broken Collective I/O"); - VRFY((no_collective_cause_global_read == no_collective_cause_global_write), - "reading and writing are the same for global cause of Broken Collective I/O"); - - /* Test values */ - memset(message, 0, sizeof(message)); - snprintf(message, sizeof(message), "Local cause of Broken Collective I/O has the correct value for %s.\n", - test_name); - VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message); - memset(message, 0, sizeof(message)); - snprintf(message, sizeof(message), - "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name); - VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message); - - /* Release some resources */ - if (sid) - H5Sclose(sid); - if (fapl_id) - H5Pclose(fapl_id); - if (dcpl) - H5Pclose(dcpl); - if (dxpl_write) - H5Pclose(dxpl_write); - if (dxpl_read) - H5Pclose(dxpl_read); - if (dataset) - H5Dclose(dataset); - if (mem_space) - H5Sclose(mem_space); - if (file_space) - H5Sclose(file_space); - if (fid) - H5Fclose(fid); - free(buffer); - - /* clean up external file */ - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) - HDremove(FILE_EXTERNAL); - - return; -} - -/* Function: no_collective_cause_tests - * - * Purpose: Tests cases for broken collective IO. - */ -void -no_collective_cause_tests(void) -{ - /* - * Test individual cause - */ - test_no_collective_cause_mode(TEST_COLLECTIVE); - test_no_collective_cause_mode(TEST_SET_INDEPENDENT); - test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION); - test_no_collective_cause_mode(TEST_DATA_TRANSFORMS); - test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES); - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT); - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL); - - /* - * Test combined causes - */ - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION); - test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | - TEST_DATA_TRANSFORMS); - - return; -} - -/* Function: dense_attr_test - * - * Purpose: Test cases for writing dense attributes in parallel - */ -void -test_dense_attr(void) -{ - int mpi_size, mpi_rank; - hid_t fpid, fid; - hid_t gid, gpid; - hid_t atFileSpace, atid; - hsize_t atDims[1] = {10000}; - herr_t status; - const char *filename; - - /* get filename */ - filename = (const char *)GetTestParameters(); - assert(filename != NULL); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - fpid = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fpid > 0), "H5Pcreate succeeded"); - status = H5Pset_libver_bounds(fpid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - VRFY((status >= 0), "H5Pset_libver_bounds succeeded"); - status = H5Pset_fapl_mpio(fpid, test_comm, MPI_INFO_NULL); - VRFY((status >= 0), "H5Pset_fapl_mpio succeeded"); - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fpid); - VRFY((fid > 0), "H5Fcreate succeeded"); - status = H5Pclose(fpid); - VRFY((status >= 0), "H5Pclose succeeded"); - - gpid = H5Pcreate(H5P_GROUP_CREATE); - VRFY((gpid > 0), "H5Pcreate succeeded"); - status = H5Pset_attr_phase_change(gpid, 0, 0); - VRFY((status >= 0), "H5Pset_attr_phase_change succeeded"); - gid = H5Gcreate2(fid, "foo", H5P_DEFAULT, gpid, H5P_DEFAULT); - VRFY((gid > 0), "H5Gcreate2 succeeded"); - status = H5Pclose(gpid); - VRFY((status >= 0), "H5Pclose succeeded"); - - atFileSpace = H5Screate_simple(1, atDims, NULL); - VRFY((atFileSpace > 0), "H5Screate_simple succeeded"); - atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT); - VRFY((atid > 0), "H5Acreate succeeded"); - status = H5Sclose(atFileSpace); - VRFY((status >= 0), "H5Sclose succeeded"); - - status = H5Aclose(atid); - VRFY((status >= 0), "H5Aclose succeeded"); - - status = H5Gclose(gid); - VRFY((status >= 0), "H5Gclose succeeded"); - status = H5Fclose(fid); - VRFY((status >= 0), "H5Fclose succeeded"); - - return; -} - int main(int argc, char **argv) { - int express_test; - int mpi_size, mpi_rank; /* mpi variables */ - hsize_t oldsize, newsize = 1048576; + test_params_t test_params; + int express_test; + int mpi_size, mpi_rank; /* mpi variables */ + hsize_t oldsize, newsize = 1048576; #ifndef H5_HAVE_WIN32_API /* Un-buffer the stdout and stderr */ @@ -4278,8 +2492,6 @@ main(int argc, char **argv) MPI_Comm_size(test_comm, &mpi_size); MPI_Comm_rank(test_comm, &mpi_rank); - mpi_rank_framework_g = mpi_rank; - memset(filenames, 0, sizeof(filenames)); dim0 = BIG_X_FACTOR; @@ -4324,36 +2536,56 @@ main(int argc, char **argv) /* Initialize testing framework */ if (mpi_rank < 2) { - TestInit(argv[0], usage, parse_options); + if (TestInit(argv[0], usage, parse_options, NULL, NULL, mpi_rank) < 0) { + fprintf(stderr, "couldn't initialize testing framework\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } - /* Parse command line arguments */ - TestParseCmdLine(argc, argv); + test_params.filename = PARATESTFILE; - AddTest("idsetw", dataset_writeInd, NULL, "dataset independent write", PARATESTFILE); + AddTest("idsetw", dataset_writeInd, NULL, NULL, &test_params, sizeof(test_params), + "dataset independent write"); - AddTest("idsetr", dataset_readInd, NULL, "dataset independent read", PARATESTFILE); + AddTest("idsetr", dataset_readInd, NULL, NULL, &test_params, sizeof(test_params), + "dataset independent read"); - AddTest("cdsetw", dataset_writeAll, NULL, "dataset collective write", PARATESTFILE); + AddTest("cdsetw", dataset_writeAll, NULL, NULL, &test_params, sizeof(test_params), + "dataset collective write"); - AddTest("cdsetr", dataset_readAll, NULL, "dataset collective read", PARATESTFILE); + AddTest("cdsetr", dataset_readAll, NULL, NULL, &test_params, sizeof(test_params), + "dataset collective read"); - AddTest("eidsetw2", extend_writeInd2, NULL, "extendible dataset independent write #2", PARATESTFILE); + AddTest("eidsetw2", extend_writeInd2, NULL, NULL, &test_params, sizeof(test_params), + "extendible dataset independent write #2"); - AddTest("selnone", none_selection_chunk, NULL, "chunked dataset with none-selection", PARATESTFILE); + AddTest("selnone", none_selection_chunk, NULL, NULL, &test_params, sizeof(test_params), + "chunked dataset with none-selection"); #ifdef H5_HAVE_FILTER_DEFLATE - AddTest("cmpdsetr", compress_readAll, NULL, "compressed dataset collective read", PARATESTFILE); + AddTest("cmpdsetr", compress_readAll, NULL, NULL, &test_params, sizeof(test_params), + "compressed dataset collective read"); #endif /* H5_HAVE_FILTER_DEFLATE */ /* Display testing information */ - TestInfo(argv[0]); + TestInfo(stdout); + + /* Parse command line arguments */ + if (TestParseCmdLine(argc, argv) < 0) { + fprintf(stderr, "couldn't parse command-line arguments\n"); + TestShutdown(); + MPI_Abort(MPI_COMM_WORLD, -1); + } /* setup file access property list */ fapl = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(fapl, test_comm, MPI_INFO_NULL); /* Perform requested testing */ - PerformTests(); + if (PerformTests() < 0) { + fprintf(stderr, "couldn't run tests\n"); + TestShutdown(); + MPI_Abort(MPI_COMM_WORLD, -1); + } } MPI_Barrier(MPI_COMM_WORLD); @@ -4376,6 +2608,12 @@ main(int argc, char **argv) filenames[i] = NULL; } + if (TestShutdown() < 0) { + if (MAINPROCESS) + fprintf(stderr, "couldn't shut down testing framework\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } + H5close(); if (test_comm != MPI_COMM_WORLD) { MPI_Comm_free(&test_comm); diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c index 068e3aa0f50..fe38dadb403 100644 --- a/testpar/t_bigio.c +++ b/testpar/t_bigio.c @@ -11,14 +11,16 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include "hdf5.h" -#include "testphdf5.h" +#include "testpar.h" #include "H5Dprivate.h" /* For Chunk tests */ +/* Include testing framework functionality */ +#include "testframe.h" + /* FILENAME and filenames must have the same number of names */ const char *FILENAME[3] = {"bigio_test.h5", "single_rank_independent_io.h5", NULL}; /* Constants definitions */ -#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */ /* Define some handy debugging shorthands, routines, ... */ /* debugging tools */ @@ -28,22 +30,27 @@ const char *FILENAME[3] = {"bigio_test.h5", "single_rank_independent_io.h5", NUL /* Constants definitions */ #define RANK 2 -#define IN_ORDER 1 -#define OUT_OF_ORDER 2 +#define DATASET1 "DSET1" +#define DATASET2 "DSET2" +#define DATASET3 "DSET3" +#define DATASET4 "DSET4" +#define DXFER_BIGCOUNT (1 << 29) + +#define SPACE_DIM1 24 +#define SPACE_DIM2 4 +#define BYROW_CONT 1 +#define BYROW_DISCONT 2 +#define BYROW_SELECTNONE 3 +#define BYROW_SELECTUNBALANCE 4 +#define BYROW_SELECTINCHUNK 5 -#define DATASET1 "DSET1" -#define DATASET2 "DSET2" -#define DATASET3 "DSET3" -#define DATASET4 "DSET4" -#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/ -#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */ -#define DXFER_BIGCOUNT (1 << 29) +#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name" #define HYPER 1 #define POINT 2 #define ALL 3 -/* Dataset data type. Int's can be easily octo dumped. */ +typedef int DATATYPE; typedef hsize_t B_DATATYPE; int facc_type = FACC_MPIO; /*Test file access type */ @@ -108,54 +115,6 @@ fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE *dataset) } } -/* - * Setup the coordinates for point selection. - */ -void -point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, - hsize_t coords[], int order) -{ - hsize_t i, j, k = 0, m, n, s1, s2; - - HDcompile_assert(RANK == 2); - - if (OUT_OF_ORDER == order) - k = (num_points * RANK) - 1; - else if (IN_ORDER == order) - k = 0; - - s1 = start[0]; - s2 = start[1]; - - for (i = 0; i < count[0]; i++) - for (j = 0; j < count[1]; j++) - for (m = 0; m < block[0]; m++) - for (n = 0; n < block[1]; n++) - if (OUT_OF_ORDER == order) { - coords[k--] = s2 + (stride[1] * j) + n; - coords[k--] = s1 + (stride[0] * i) + m; - } - else if (IN_ORDER == order) { - coords[k++] = s1 + stride[0] * i + m; - coords[k++] = s2 + stride[1] * j + n; - } - - if (VERBOSE_MED) { - printf("start[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " - "count[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " - "stride[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " - "block[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " - "total datapoints=%" PRIuHSIZE "\n", - start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1], - block[0] * block[1] * count[0] * count[1]); - k = 0; - for (i = 0; i < num_points; i++) { - printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); - k += 2; - } - } -} - /* * Print the content of the dataset. */ @@ -1186,59 +1145,6 @@ single_rank_independent_io(void) MPI_Barrier(MPI_COMM_WORLD); } -/* - * Create the appropriate File access property list - */ -hid_t -create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) -{ - hid_t ret_pl = H5I_INVALID_HID; - herr_t ret; /* generic return value */ - int mpi_rank; /* mpi variables */ - - /* need the rank for error checking macros */ - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY_G((ret_pl >= 0), "H5P_FILE_ACCESS"); - - if (l_facc_type == FACC_DEFAULT) - return (ret_pl); - - if (l_facc_type == FACC_MPIO) { - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY_G((ret >= 0), ""); - ret = H5Pset_all_coll_metadata_ops(ret_pl, true); - VRFY_G((ret >= 0), ""); - ret = H5Pset_coll_metadata_write(ret_pl, true); - VRFY_G((ret >= 0), ""); - return (ret_pl); - } - - if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { - hid_t mpio_pl; - - mpio_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY_G((mpio_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY_G((ret >= 0), ""); - - /* setup file access template */ - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY_G((ret_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY_G((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return (ret_pl); - } - - /* unknown file access types */ - return (ret_pl); -} - /*------------------------------------------------------------------------- * Function: coll_chunk1 * @@ -1269,7 +1175,7 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) * ------------------------------------------------------------------------ */ -void +static void coll_chunk1(void) { const char *filename = FILENAME[0]; @@ -1317,7 +1223,7 @@ coll_chunk1(void) * * ------------------------------------------------------------------------ */ -void +static void coll_chunk2(void) { const char *filename = FILENAME[0]; @@ -1366,7 +1272,7 @@ coll_chunk2(void) * ------------------------------------------------------------------------ */ -void +static void coll_chunk3(void) { const char *filename = FILENAME[0]; @@ -1922,7 +1828,12 @@ main(int argc, char **argv) printf("Failed to turn off atexit processing. Continue.\n"); /* set alarm. */ - TestAlarmOn(); + if (TestAlarmOn() < 0) { + if (MAIN_PROCESS) + fprintf(stderr, "couldn't enable test timer\n"); + MPI_Finalize(); + return -1; + } acc_plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); diff --git a/testpar/t_cache.c b/testpar/t_cache.c index 162b8f31679..5d0b9dc1a2f 100644 --- a/testpar/t_cache.c +++ b/testpar/t_cache.c @@ -779,7 +779,7 @@ init_data(void) * Function: do_express_test() * * Purpose: Do an MPI_Allreduce to obtain the maximum value returned - * by GetTestExpress() across all processes. Return this + * by h5_get_testexpress() across all processes. Return this * value. * * Envirmoment variables can be different across different @@ -787,7 +787,7 @@ init_data(void) * on whether to do an express test. * * Return: Success: Maximum of the values returned by - * GetTestExpress() across all processes. + * h5_get_testexpress() across all processes. * * Failure: -1 * @@ -799,7 +799,7 @@ do_express_test(void) int max_express_test; int result; - express_test = GetTestExpress(); + express_test = h5_get_testexpress(); result = MPI_Allreduce((void *)&express_test, (void *)&max_express_test, 1, MPI_INT, MPI_MAX, world_mpi_comm); @@ -1068,7 +1068,7 @@ setup_derived_types(void) int i; int result; MPI_Datatype mpi_types[9] = {MPI_INT, MPI_INT, MPI_INT, MPI_LONG, HADDR_AS_MPI_TYPE, - MPI_INT, MPI_INT, MPI_UNSIGNED, MPI_UNSIGNED}; + MPI_INT, MPI_INT, MPI_UNSIGNED, MPI_UNSIGNED}; int block_len[9] = {1, 1, 1, 1, 1, 1, 1, 1, 1}; MPI_Aint displs[9]; struct mssg_t sample; /* used to compute displacements */ @@ -6037,57 +6037,57 @@ trace_file_check(int metadata_write_strategy) const char *((*expected_output)[]) = NULL; const char *expected_output_0[] = {"### HDF5 metadata cache trace file version 1 ###\n", - "H5AC_set_cache_auto_resize_config", - "H5AC_insert_entry", - "H5AC_insert_entry", - "H5AC_insert_entry", - "H5AC_insert_entry", - "H5AC_protect", - "H5AC_mark_entry_dirty", - "H5AC_unprotect", - "H5AC_protect", - "H5AC_pin_protected_entry", - "H5AC_unprotect", - "H5AC_unpin_entry", - "H5AC_expunge_entry", - "H5AC_protect", - "H5AC_pin_protected_entry", - "H5AC_unprotect", - "H5AC_mark_entry_dirty", - "H5AC_resize_entry", - "H5AC_resize_entry", - "H5AC_unpin_entry", - "H5AC_move_entry", - "H5AC_move_entry", - "H5AC_flush", - "H5AC_flush", - NULL}; + "H5AC_set_cache_auto_resize_config", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_protect", + "H5AC_mark_entry_dirty", + "H5AC_unprotect", + "H5AC_protect", + "H5AC_pin_protected_entry", + "H5AC_unprotect", + "H5AC_unpin_entry", + "H5AC_expunge_entry", + "H5AC_protect", + "H5AC_pin_protected_entry", + "H5AC_unprotect", + "H5AC_mark_entry_dirty", + "H5AC_resize_entry", + "H5AC_resize_entry", + "H5AC_unpin_entry", + "H5AC_move_entry", + "H5AC_move_entry", + "H5AC_flush", + "H5AC_flush", + NULL}; const char *expected_output_1[] = {"### HDF5 metadata cache trace file version 1 ###\n", - "H5AC_set_cache_auto_resize_config", - "H5AC_insert_entry", - "H5AC_insert_entry", - "H5AC_insert_entry", - "H5AC_insert_entry", - "H5AC_protect", - "H5AC_mark_entry_dirty", - "H5AC_unprotect", - "H5AC_protect", - "H5AC_pin_protected_entry", - "H5AC_unprotect", - "H5AC_unpin_entry", - "H5AC_expunge_entry", - "H5AC_protect", - "H5AC_pin_protected_entry", - "H5AC_unprotect", - "H5AC_mark_entry_dirty", - "H5AC_resize_entry", - "H5AC_resize_entry", - "H5AC_unpin_entry", - "H5AC_move_entry", - "H5AC_move_entry", - "H5AC_flush", - "H5AC_flush", - NULL}; + "H5AC_set_cache_auto_resize_config", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_protect", + "H5AC_mark_entry_dirty", + "H5AC_unprotect", + "H5AC_protect", + "H5AC_pin_protected_entry", + "H5AC_unprotect", + "H5AC_unpin_entry", + "H5AC_expunge_entry", + "H5AC_protect", + "H5AC_pin_protected_entry", + "H5AC_unprotect", + "H5AC_mark_entry_dirty", + "H5AC_resize_entry", + "H5AC_resize_entry", + "H5AC_unpin_entry", + "H5AC_move_entry", + "H5AC_move_entry", + "H5AC_flush", + "H5AC_flush", + NULL}; char buffer[256]; char trace_file_name[64]; bool done = false; diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c index 5de615038b2..88c070968fd 100644 --- a/testpar/t_cache_image.c +++ b/testpar/t_cache_image.c @@ -15,7 +15,7 @@ * feature implemented in H5C.c */ -#include "testphdf5.h" +#include "testpar.h" #include "cache_common.h" #include "genall5.h" diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c index 1d5978306a8..202551ff9c8 100644 --- a/testpar/t_chunk_alloc.c +++ b/testpar/t_chunk_alloc.c @@ -456,7 +456,7 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in * it, read to verify all data are as written. */ void -test_chunk_alloc(void) +test_chunk_alloc(const void *params) { const char *filename; hid_t file_id, dataset; @@ -480,7 +480,7 @@ test_chunk_alloc(void) return; } - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Extend Chunked allocation test on file %s\n", filename); @@ -542,7 +542,7 @@ test_chunk_alloc(void) * fashion. */ void -test_chunk_alloc_incr_ser_to_par(void) +test_chunk_alloc_incr_ser_to_par(const void *params) { H5D_space_status_t space_status; const char *filename; @@ -567,7 +567,7 @@ test_chunk_alloc_incr_ser_to_par(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (MAINPROCESS && VERBOSE_MED) printf("Chunked dataset incremental file space allocation serial to parallel test on file %s\n", filename); diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index fa3459d252f..eeaced0a067 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -64,9 +64,9 @@ static void coll_chunktest(const char *filename, int chunk_factor, int select_fa */ void -coll_chunk1(void) +coll_chunk1(const void *params) { - const char *filename = GetTestParameters(); + const char *filename = ((const H5Ptest_param_t *)params)->name; int mpi_rank; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -126,9 +126,9 @@ coll_chunk1(void) * ------------------------------------------------------------------------ */ void -coll_chunk2(void) +coll_chunk2(const void *params) { - const char *filename = GetTestParameters(); + const char *filename = ((const H5Ptest_param_t *)params)->name; int mpi_rank; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -189,9 +189,9 @@ coll_chunk2(void) */ void -coll_chunk3(void) +coll_chunk3(const void *params) { - const char *filename = GetTestParameters(); + const char *filename = ((const H5Ptest_param_t *)params)->name; int mpi_size; int mpi_rank; @@ -254,9 +254,9 @@ coll_chunk3(void) */ void -coll_chunk4(void) +coll_chunk4(const void *params) { - const char *filename = GetTestParameters(); + const char *filename = ((const H5Ptest_param_t *)params)->name; int mpi_rank; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -317,9 +317,9 @@ coll_chunk4(void) */ void -coll_chunk5(void) +coll_chunk5(const void *params) { - const char *filename = GetTestParameters(); + const char *filename = ((const H5Ptest_param_t *)params)->name; int mpi_rank; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -382,9 +382,9 @@ coll_chunk5(void) */ void -coll_chunk6(void) +coll_chunk6(const void *params) { - const char *filename = GetTestParameters(); + const char *filename = ((const H5Ptest_param_t *)params)->name; int mpi_rank; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -445,9 +445,9 @@ coll_chunk6(void) */ void -coll_chunk7(void) +coll_chunk7(const void *params) { - const char *filename = GetTestParameters(); + const char *filename = ((const H5Ptest_param_t *)params)->name; int mpi_rank; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -508,9 +508,9 @@ coll_chunk7(void) */ void -coll_chunk8(void) +coll_chunk8(const void *params) { - const char *filename = GetTestParameters(); + const char *filename = ((const H5Ptest_param_t *)params)->name; int mpi_rank; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -571,9 +571,9 @@ coll_chunk8(void) */ void -coll_chunk9(void) +coll_chunk9(const void *params) { - const char *filename = GetTestParameters(); + const char *filename = ((const H5Ptest_param_t *)params)->name; int mpi_rank; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -634,9 +634,9 @@ coll_chunk9(void) */ void -coll_chunk10(void) +coll_chunk10(const void *params) { - const char *filename = GetTestParameters(); + const char *filename = ((const H5Ptest_param_t *)params)->name; int mpi_rank; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -739,6 +739,22 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap VRFY((coords != NULL), "coords malloc succeeded"); point_set(start, count, stride, block, num_points, coords, mode); + if (VERBOSE_MED) { + hsize_t k = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t i = 0; i < num_points; i++) { + printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += RANK; + } + } + file_dataspace = H5Screate_simple(2, dims, NULL); VRFY((file_dataspace >= 0), "file dataspace created succeeded"); diff --git a/testpar/t_coll_md.c b/testpar/t_coll_md.c index 043ecf81208..078a561279e 100644 --- a/testpar/t_coll_md.c +++ b/testpar/t_coll_md.c @@ -63,14 +63,14 @@ * arbitrary number (0 was chosen). */ void -test_partial_no_selection_coll_md_read(void) +test_partial_no_selection_coll_md_read(const void *params) { const char *filename; hsize_t *dataset_dims = NULL; hsize_t max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS]; hsize_t sel_dims[1]; hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = {PARTIAL_NO_SELECTION_Y_DIM_SCALE, - PARTIAL_NO_SELECTION_X_DIM_SCALE}; + PARTIAL_NO_SELECTION_X_DIM_SCALE}; hsize_t start[PARTIAL_NO_SELECTION_DATASET_NDIMS]; hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS]; hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS]; @@ -102,7 +102,7 @@ test_partial_no_selection_coll_md_read(void) return; } - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); @@ -262,7 +262,7 @@ test_partial_no_selection_coll_md_read(void) * */ void -test_multi_chunk_io_addrmap_issue(void) +test_multi_chunk_io_addrmap_issue(const void *params) { const char *filename; hsize_t start[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; @@ -297,7 +297,7 @@ test_multi_chunk_io_addrmap_issue(void) return; } - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); @@ -390,7 +390,7 @@ test_multi_chunk_io_addrmap_issue(void) *2096 but expected 320000 major: Internal error (too specific to document in detail) minor: MPI Error String */ void -test_link_chunk_io_sort_chunk_issue(void) +test_link_chunk_io_sort_chunk_issue(const void *params) { const char *filename; hsize_t dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; @@ -427,7 +427,7 @@ test_link_chunk_io_sort_chunk_issue(void) return; } - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); @@ -554,7 +554,7 @@ test_link_chunk_io_sort_chunk_issue(void) * heap data is not correctly mapped as raw data. */ void -test_collective_global_heap_write(void) +test_collective_global_heap_write(const void *params) { const char *filename; hsize_t attr_dims[COLL_GHEAP_WRITE_ATTR_DIMS]; @@ -583,7 +583,7 @@ test_collective_global_heap_write(void) return; } - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); @@ -634,7 +634,7 @@ test_collective_global_heap_write(void) * collective metadata writes are NOT requested. */ void -test_coll_io_ind_md_write(void) +test_coll_io_ind_md_write(const void *params) { const char *filename; long long *data = NULL; @@ -654,7 +654,7 @@ test_coll_io_ind_md_write(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); diff --git a/testpar/t_dset.c b/testpar/t_dset.c index bf4fcfda1a7..cf4ada8139d 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -122,53 +122,6 @@ slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t s } } -/* - * Setup the coordinates for point selection. - */ -void -point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, - hsize_t coords[], int order) -{ - hsize_t i, j, k = 0, m, n, s1, s2; - - HDcompile_assert(RANK == 2); - - if (OUT_OF_ORDER == order) - k = (num_points * RANK) - 1; - else if (IN_ORDER == order) - k = 0; - - s1 = start[0]; - s2 = start[1]; - - for (i = 0; i < count[0]; i++) - for (j = 0; j < count[1]; j++) - for (m = 0; m < block[0]; m++) - for (n = 0; n < block[1]; n++) - if (OUT_OF_ORDER == order) { - coords[k--] = s2 + (stride[1] * j) + n; - coords[k--] = s1 + (stride[0] * i) + m; - } - else if (IN_ORDER == order) { - coords[k++] = s1 + stride[0] * i + m; - coords[k++] = s2 + stride[1] * j + n; - } - - if (VERBOSE_MED) { - printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " - "datapoints=%lu\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], - (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], - (unsigned long)block[0], (unsigned long)block[1], - (unsigned long)(block[0] * block[1] * count[0] * count[1])); - k = 0; - for (i = 0; i < num_points; i++) { - printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); - k += 2; - } - } -} - /* * Fill the dataset with trivial data for testing. * Assume dimension rank is 2 and data is stored contiguous. @@ -271,7 +224,7 @@ dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[] */ void -dataset_writeInd(void) +dataset_writeInd(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -293,7 +246,7 @@ dataset_writeInd(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Independent write test on file %s\n", filename); @@ -423,7 +376,7 @@ dataset_writeInd(void) /* Example of using the parallel HDF5 library to read a dataset */ void -dataset_readInd(void) +dataset_readInd(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -444,7 +397,7 @@ dataset_readInd(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Independent read test on file %s\n", filename); @@ -558,7 +511,7 @@ dataset_readInd(void) */ void -dataset_writeAll(void) +dataset_writeAll(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -587,7 +540,7 @@ dataset_writeAll(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Collective write test on file %s\n", filename); @@ -948,6 +901,22 @@ dataset_writeAll(void) /* Dataset5: point selection in File - Hyperslab selection in Memory*/ /* create a file dataspace independently */ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + if (VERBOSE_MED) { + hsize_t k = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t i = 0; i < num_points; i++) { + printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += RANK; + } + } + file_dataspace = H5Dget_space(dataset5); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); @@ -984,6 +953,22 @@ dataset_writeAll(void) start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); start[1] = 0; point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + if (VERBOSE_MED) { + hsize_t k = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t i = 0; i < num_points; i++) { + printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += RANK; + } + } + file_dataspace = H5Dget_space(dataset6); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); @@ -992,6 +977,22 @@ dataset_writeAll(void) start[0] = 0; start[1] = 0; point_set(start, count, stride, block, num_points, coords, IN_ORDER); + if (VERBOSE_MED) { + hsize_t k = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t i = 0; i < num_points; i++) { + printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += RANK; + } + } + mem_dataspace = H5Dget_space(dataset6); VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1021,6 +1022,22 @@ dataset_writeAll(void) start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); start[1] = 0; point_set(start, count, stride, block, num_points, coords, IN_ORDER); + if (VERBOSE_MED) { + hsize_t k = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t i = 0; i < num_points; i++) { + printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += RANK; + } + } + file_dataspace = H5Dget_space(dataset7); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1090,7 +1107,7 @@ dataset_writeAll(void) */ void -dataset_readAll(void) +dataset_readAll(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -1116,7 +1133,7 @@ dataset_readAll(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Collective read test on file %s\n", filename); @@ -1353,6 +1370,22 @@ dataset_readAll(void) start[0] = 0; start[1] = 0; point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + if (VERBOSE_MED) { + hsize_t idx = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t point = 0; point < num_points; point++) { + printf("(%d, %d)\n", (int)coords[idx], (int)coords[idx + 1]); + idx += RANK; + } + } + mem_dataspace = H5Dget_space(dataset5); VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1391,6 +1424,22 @@ dataset_readAll(void) start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); start[1] = 0; point_set(start, count, stride, block, num_points, coords, IN_ORDER); + if (VERBOSE_MED) { + hsize_t idx = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t point = 0; point < num_points; point++) { + printf("(%d, %d)\n", (int)coords[idx], (int)coords[idx + 1]); + idx += RANK; + } + } + file_dataspace = H5Dget_space(dataset6); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1399,6 +1448,22 @@ dataset_readAll(void) start[0] = 0; start[1] = 0; point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + if (VERBOSE_MED) { + hsize_t idx = 0; + + printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + + for (size_t point = 0; point < num_points; point++) { + printf("(%d, %d)\n", (int)coords[idx], (int)coords[idx + 1]); + idx += RANK; + } + } + mem_dataspace = H5Dget_space(dataset6); VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); @@ -1517,7 +1582,7 @@ dataset_readAll(void) */ void -extend_writeInd(void) +extend_writeInd(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -1543,7 +1608,7 @@ extend_writeInd(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Extend independent write test on file %s\n", filename); @@ -1751,7 +1816,7 @@ extend_writeInd(void) */ void -extend_writeInd2(void) +extend_writeInd2(const void *params) { const char *filename; hid_t fid; /* HDF5 file ID */ @@ -1771,7 +1836,7 @@ extend_writeInd2(void) int i; /* Local index variable */ herr_t ret; /* Generic return value */ - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Extend independent write test #2 on file %s\n", filename); @@ -1924,7 +1989,7 @@ extend_writeInd2(void) /* Example of using the parallel HDF5 library to read an extendible dataset */ void -extend_readInd(void) +extend_readInd(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -1947,7 +2012,7 @@ extend_readInd(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Extend independent read test on file %s\n", filename); @@ -2114,7 +2179,7 @@ extend_readInd(void) */ void -extend_writeAll(void) +extend_writeAll(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -2141,7 +2206,7 @@ extend_writeAll(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Extend independent write test on file %s\n", filename); @@ -2367,7 +2432,7 @@ extend_writeAll(void) /* Example of using the parallel HDF5 library to read an extendible dataset */ void -extend_readAll(void) +extend_readAll(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -2391,7 +2456,7 @@ extend_readAll(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Extend independent read test on file %s\n", filename); @@ -2573,7 +2638,7 @@ extend_readAll(void) */ #ifdef H5_HAVE_FILTER_DEFLATE void -compress_readAll(void) +compress_readAll(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -2594,7 +2659,7 @@ compress_readAll(void) int mpi_size, mpi_rank; herr_t ret; /* Generic return value */ - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Collective chunked dataset read test on file %s\n", filename); @@ -2769,7 +2834,7 @@ compress_readAll(void) */ void -none_selection_chunk(void) +none_selection_chunk(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -2797,7 +2862,7 @@ none_selection_chunk(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Extend independent write test on file %s\n", filename); @@ -3031,7 +3096,7 @@ none_selection_chunk(void) * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO instead of num-threshold. */ static void -test_actual_io_mode(int selection_mode) +test_actual_io_mode(const void *params, int selection_mode) { H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = H5D_MPIO_NO_CHUNK_OPTIMIZATION; H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = H5D_MPIO_NO_CHUNK_OPTIMIZATION; @@ -3111,7 +3176,7 @@ test_actual_io_mode(int selection_mode) mpi_comm = MPI_COMM_WORLD; mpi_info = MPI_INFO_NULL; - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; assert(filename != NULL); /* Setup the file access template */ @@ -3484,7 +3549,7 @@ test_actual_io_mode(int selection_mode) * */ void -actual_io_mode_tests(void) +actual_io_mode_tests(const void *params) { H5D_selection_io_mode_t selection_io_mode; hid_t dxpl_id = H5I_INVALID_HID; @@ -3507,32 +3572,32 @@ actual_io_mode_tests(void) VRFY((ret >= 0), "H5Pclose succeeded"); if (selection_io_mode == H5D_SELECTION_IO_MODE_OFF) { - test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); + test_actual_io_mode(params, TEST_ACTUAL_IO_NO_COLLECTIVE); /* * Test multi-chunk-io via proc_num threshold */ - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); + test_actual_io_mode(params, TEST_ACTUAL_IO_MULTI_CHUNK_IND); + test_actual_io_mode(params, TEST_ACTUAL_IO_MULTI_CHUNK_COL); /* The Multi Chunk Mixed test requires at least three processes. */ if (mpi_size > 2) - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); + test_actual_io_mode(params, TEST_ACTUAL_IO_MULTI_CHUNK_MIX); else fprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n"); - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); + test_actual_io_mode(params, TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); /* * Test multi-chunk-io via setting direct property */ - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); + test_actual_io_mode(params, TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND); + test_actual_io_mode(params, TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); - test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK); - test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS); + test_actual_io_mode(params, TEST_ACTUAL_IO_LINK_CHUNK); + test_actual_io_mode(params, TEST_ACTUAL_IO_CONTIGUOUS); - test_actual_io_mode(TEST_ACTUAL_IO_RESET); + test_actual_io_mode(params, TEST_ACTUAL_IO_RESET); } return; @@ -3579,7 +3644,7 @@ actual_io_mode_tests(void) */ #define FILE_EXTERNAL "nocolcause_extern.data" static void -test_no_collective_cause_mode(int selection_mode) +test_no_collective_cause_mode(const void *params, int selection_mode) { uint32_t no_collective_cause_local_write = 0; uint32_t no_collective_cause_local_read = 0; @@ -3682,7 +3747,7 @@ test_no_collective_cause_mode(int selection_mode) VRFY((sid >= 0), "H5Screate_simple succeeded"); } - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; assert(filename != NULL); /* Setup the file access template */ @@ -3920,7 +3985,7 @@ test_no_collective_cause_mode(int selection_mode) /* clean up external file */ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) - H5Fdelete(FILE_EXTERNAL, fapl); + HDremove(FILE_EXTERNAL); if (fapl) H5Pclose(fapl); @@ -3934,26 +3999,27 @@ test_no_collective_cause_mode(int selection_mode) * */ void -no_collective_cause_tests(void) +no_collective_cause_tests(const void *params) { /* * Test individual cause */ - test_no_collective_cause_mode(TEST_COLLECTIVE); - test_no_collective_cause_mode(TEST_SET_INDEPENDENT); - test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION); - test_no_collective_cause_mode(TEST_DATA_TRANSFORMS); - test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES); - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT); - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL); + test_no_collective_cause_mode(params, TEST_COLLECTIVE); + test_no_collective_cause_mode(params, TEST_SET_INDEPENDENT); + test_no_collective_cause_mode(params, TEST_DATATYPE_CONVERSION); + test_no_collective_cause_mode(params, TEST_DATA_TRANSFORMS); + test_no_collective_cause_mode(params, TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES); + test_no_collective_cause_mode(params, TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT); + test_no_collective_cause_mode(params, TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL); /* * Test combined causes */ - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION); - test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | - TEST_DATA_TRANSFORMS); + test_no_collective_cause_mode(params, + TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION); + test_no_collective_cause_mode(params, TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); + test_no_collective_cause_mode(params, TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | + TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); return; } @@ -3970,7 +4036,7 @@ no_collective_cause_tests(void) */ void -dataset_atomicity(void) +dataset_atomicity(const void *params) { hid_t fid; /* HDF5 file ID */ hid_t acc_tpl; /* File access templates */ @@ -3997,7 +4063,7 @@ dataset_atomicity(void) dim0 = 64; dim1 = 32; - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (facc_type != FACC_MPIO) { printf("Atomicity tests will not work without the MPIO VFD\n"); return; @@ -4318,7 +4384,7 @@ dataset_atomicity(void) * */ void -test_dense_attr(void) +test_dense_attr(const void *params) { int mpi_size, mpi_rank; hid_t fpid, fid; @@ -4346,7 +4412,7 @@ test_dense_attr(void) } /* get filename */ - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; assert(filename != NULL); fpid = H5Pcreate(H5P_FILE_ACCESS); diff --git a/testpar/t_file.c b/testpar/t_file.c index acfb45d30a8..1fd4b41e82f 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -57,7 +57,7 @@ static int open_file(const char *filename, hid_t fapl, int metadata_write_strate * sooner or later due to barrier mixed up. */ void -test_split_comm_access(void) +test_split_comm_access(const void *params) { MPI_Comm comm; MPI_Info info = MPI_INFO_NULL; @@ -68,7 +68,7 @@ test_split_comm_access(void) herr_t ret; /* generic return value */ const char *filename; - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Split Communicator access test on file %s\n", filename); @@ -134,7 +134,7 @@ test_split_comm_access(void) } void -test_page_buffer_access(void) +test_page_buffer_access(const void *params) { const char *filename; hid_t file_id = H5I_INVALID_HID; /* File ID */ @@ -152,7 +152,7 @@ test_page_buffer_access(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; /* Until page buffering is supported in parallel in some form (even if * just for a single MPI process), this test just will just check to @@ -790,7 +790,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t * multiple opens of the same file. */ void -test_file_properties(void) +test_file_properties(const void *params) { hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ @@ -823,7 +823,7 @@ test_file_properties(void) return; } - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; mpi_ret = MPI_Info_create(&info); VRFY((mpi_ret >= 0), "MPI_Info_create succeeded"); @@ -995,7 +995,7 @@ test_file_properties(void) } /* end test_file_properties() */ void -test_delete(void) +test_delete(const void *params) { hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ @@ -1005,7 +1005,7 @@ test_delete(void) htri_t is_accessible = FAIL; /* Whether a file is accessible */ herr_t ret; /* Generic return value */ - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); @@ -1075,7 +1075,7 @@ test_delete(void) * due to an invalid library version bounds setting */ void -test_invalid_libver_bounds_file_close_assert(void) +test_invalid_libver_bounds_file_close_assert(const void *params) { const char *filename = NULL; MPI_Comm comm = MPI_COMM_WORLD; @@ -1085,7 +1085,7 @@ test_invalid_libver_bounds_file_close_assert(void) hid_t fapl_id = H5I_INVALID_HID; hid_t fcpl_id = H5I_INVALID_HID; - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); @@ -1125,7 +1125,7 @@ test_invalid_libver_bounds_file_close_assert(void) * called by multiple ranks. */ void -test_evict_on_close_parallel_unsupp(void) +test_evict_on_close_parallel_unsupp(const void *params) { const char *filename = NULL; MPI_Comm comm = MPI_COMM_WORLD; @@ -1134,7 +1134,7 @@ test_evict_on_close_parallel_unsupp(void) hid_t fapl_id = H5I_INVALID_HID; herr_t ret; - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); @@ -1185,7 +1185,7 @@ test_evict_on_close_parallel_unsupp(void) * This is a test program from the user. */ void -test_fapl_preserve_hints(void) +test_fapl_preserve_hints(const void *params) { const char *filename; const char *key = "hdf_info_fapl"; @@ -1203,7 +1203,7 @@ test_fapl_preserve_hints(void) int mpi_ret; /* MPI return value */ herr_t ret; /* Generic return value */ - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; value_used = malloc(MPI_MAX_INFO_VAL + 1); VRFY(value_used, "malloc succeeded"); diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c index 1790685cfe0..ae1658e47d3 100644 --- a/testpar/t_file_image.c +++ b/testpar/t_file_image.c @@ -58,7 +58,7 @@ * JRM -- 11/28/11 */ void -file_image_daisy_chain_test(void) +file_image_daisy_chain_test(const void H5_ATTR_UNUSED *params) { char file_name[1024] = "\0"; int mpi_size, mpi_rank; diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c index c00d139d08e..e7cd95de672 100644 --- a/testpar/t_filter_read.c +++ b/testpar/t_filter_read.c @@ -192,7 +192,7 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) */ void -test_filter_read(void) +test_filter_read(const void *params) { hid_t dc; /* HDF5 IDs */ const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */ @@ -220,7 +220,7 @@ test_filter_read(void) hsize_t combo_size; /* Size of dataset with multiple filters */ #endif /* H5_HAVE_FILTER_DEFLATE || H5_HAVE_FILTER_SZIP */ - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; if (VERBOSE_MED) printf("Parallel reading of dataset written with filters %s\n", filename); diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index fad597e17da..69f4f12551b 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -9989,12 +9989,17 @@ main(int argc, char **argv) if (VERBOSE_MED) h5_show_hostname(); - TestAlarmOn(); + if (TestAlarmOn() < 0) { + if (MAINPROCESS) + fprintf(stderr, "couldn't enable test timer\n"); + fflush(stderr); + MPI_Abort(MPI_COMM_WORLD, -1); + } /* * Get the TestExpress level setting */ - test_express_level_g = GetTestExpress(); + test_express_level_g = h5_get_testexpress(); if ((test_express_level_g >= 1) && MAINPROCESS) { printf("** Some tests will be skipped due to TestExpress setting.\n"); printf("** Exhaustive tests will only be performed for the first available filter.\n"); diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h index 04d36395dbc..f853b779e89 100644 --- a/testpar/t_filters_parallel.h +++ b/testpar/t_filters_parallel.h @@ -24,6 +24,9 @@ #include "stdlib.h" #include "testpar.h" +/* Include testing framework functionality */ +#include "testframe.h" + #define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0]) /* Used to load other filters than GZIP */ diff --git a/testpar/t_init_term.c b/testpar/t_init_term.c index 0268e3d9eca..ec12396bc05 100644 --- a/testpar/t_init_term.c +++ b/testpar/t_init_term.c @@ -15,7 +15,7 @@ * termination of the HDF5 library with MPI init and finalize. */ -#include "testphdf5.h" +#include "testpar.h" int nerrors = 0; /* errors count */ diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index b9cb4cc5729..ae7ecde133e 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -31,6 +31,9 @@ static int read_attribute(hid_t, int, int); static int check_value(DATATYPE *, DATATYPE *, int); static void get_slab(hsize_t[], hsize_t[], hsize_t[], hsize_t[], int); +static void rr_obj_hdr_flush_confusion_writer(const void *params, MPI_Comm comm); +static void rr_obj_hdr_flush_confusion_reader(const void *params, MPI_Comm comm); + /* * The size value computed by this function is used extensively in * configuring tests for the current number of processes. @@ -72,7 +75,7 @@ get_size(void) * */ void -zero_dim_dset(void) +zero_dim_dset(const void *params) { int mpi_size, mpi_rank; const char *filename; @@ -95,7 +98,7 @@ zero_dim_dset(void) return; } - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); VRFY((plist >= 0), "create_faccess_plist succeeded"); @@ -141,7 +144,7 @@ zero_dim_dset(void) * a slab of array to the file. */ void -multiple_dset_write(void) +multiple_dset_write(const void *params) { int i, j, n, mpi_size, mpi_rank, size; hid_t iof, plist, dataset, memspace, filespace; @@ -157,7 +160,7 @@ multiple_dset_write(void) char *filename; int ndatasets; - pt = GetTestParameters(); + pt = params; filename = pt->name; ndatasets = pt->count; @@ -235,7 +238,7 @@ multiple_dset_write(void) /* Example of using PHDF5 to create, write, and read compact dataset. */ void -compact_dataset(void) +compact_dataset(const void *params) { int i, j, mpi_size, mpi_rank, size, err_num = 0; hid_t iof, plist, dcpl, dxpl, dataset, filespace; @@ -274,7 +277,7 @@ compact_dataset(void) inme = malloc((size_t)size * (size_t)size * sizeof(double)); VRFY((outme != NULL), "malloc succeeded for inme"); - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; VRFY((mpi_size <= size), "mpi_size <= size"); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); @@ -375,7 +378,7 @@ compact_dataset(void) * of Null dataspace. */ void -null_dataset(void) +null_dataset(const void *params) { int mpi_size, mpi_rank; hid_t iof, plist, dxpl, dataset, attr, sid; @@ -403,7 +406,7 @@ null_dataset(void) return; } - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); @@ -492,7 +495,7 @@ null_dataset(void) * the boundary of interest. */ void -big_dataset(void) +big_dataset(const void *params) { int mpi_size, mpi_rank; /* MPI info */ hid_t iof, /* File ID */ @@ -523,7 +526,7 @@ big_dataset(void) /* Verify MPI_Offset can handle larger than 2GB sizes */ VRFY((sizeof(MPI_Offset) > 4), "sizeof(MPI_Offset)>4"); - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); VRFY((fapl >= 0), "create_faccess_plist succeeded"); @@ -634,7 +637,7 @@ big_dataset(void) * default fill value of zeros to work correctly. */ void -dataset_fillvalue(void) +dataset_fillvalue(const void *params) { int mpi_size, mpi_rank; /* MPI info */ int err_num; /* Number of errors */ @@ -672,7 +675,7 @@ dataset_fillvalue(void) return; } - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; /* Set the dataset dimension to be one row more than number of processes */ /* and calculate the actual dataset size. */ @@ -888,17 +891,17 @@ dataset_fillvalue(void) /* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */ void -collective_group_write_independent_group_read(void) +collective_group_write_independent_group_read(const void *params) { - collective_group_write(); - independent_group_read(); + collective_group_write(params); + independent_group_read(params); } /* Write multiple groups with a chunked dataset in each group collectively. * These groups and datasets are for testing independent read later. */ void -collective_group_write(void) +collective_group_write(const void *params) { int mpi_rank, mpi_size, size; int i, j, m; @@ -913,7 +916,7 @@ collective_group_write(void) char *filename; int ngroups; - pt = GetTestParameters(); + pt = params; filename = pt->name; ngroups = pt->count; @@ -1011,7 +1014,7 @@ collective_group_write(void) * datasets independently. */ void -independent_group_read(void) +independent_group_read(const void *params) { int mpi_rank, m; hid_t plist, fid; @@ -1020,7 +1023,7 @@ independent_group_read(void) int ngroups; herr_t ret; - pt = GetTestParameters(); + pt = params; filename = pt->name; ngroups = pt->count; @@ -1139,7 +1142,7 @@ group_dataset_read(hid_t fid, int mpi_rank, int m) * */ void -multiple_group_write(void) +multiple_group_write(const void *params) { int mpi_rank, mpi_size, size; int m; @@ -1152,7 +1155,7 @@ multiple_group_write(void) char *filename; int ngroups; - pt = GetTestParameters(); + pt = params; filename = pt->name; ngroups = pt->count; @@ -1308,7 +1311,7 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter) * every dataset in every group and check their correctness. */ void -multiple_group_read(void) +multiple_group_read(const void *params) { int mpi_rank, mpi_size, error_num, size; int m; @@ -1320,7 +1323,7 @@ multiple_group_read(void) char *filename; int ngroups; - pt = GetTestParameters(); + pt = params; filename = pt->name; ngroups = pt->count; @@ -1617,7 +1620,7 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t #define N 4 void -io_mode_confusion(void) +io_mode_confusion(const void *params) { /* * HDF5 APIs definitions @@ -1650,7 +1653,7 @@ io_mode_confusion(void) const H5Ptest_param_t *pt; char *filename; - pt = GetTestParameters(); + pt = params; filename = pt->name; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -1894,10 +1897,10 @@ io_mode_confusion(void) const char *dataset_name[NUM_DATA_SETS] = {"dataset_0", "dataset_1", "dataset_2", "dataset_3"}; const char *att_name[NUM_DATA_SETS] = {"attribute_0", "attribute_1", "attribute_2", "attribute_3"}; const char *lg_att_name[NUM_DATA_SETS] = {"large_attribute_0", "large_attribute_1", "large_attribute_2", - "large_attribute_3"}; + "large_attribute_3"}; void -rr_obj_hdr_flush_confusion(void) +rr_obj_hdr_flush_confusion(const void *params) { /* MPI variables */ /* private communicator size and rank */ @@ -1946,9 +1949,9 @@ rr_obj_hdr_flush_confusion(void) * step. When all steps are done, they inform readers to end. */ if (is_reader) - rr_obj_hdr_flush_confusion_reader(comm); + rr_obj_hdr_flush_confusion_reader(params, comm); else - rr_obj_hdr_flush_confusion_writer(comm); + rr_obj_hdr_flush_confusion_writer(params, comm); MPI_Comm_free(&comm); if (verbose) @@ -1958,8 +1961,8 @@ rr_obj_hdr_flush_confusion(void) } /* rr_obj_hdr_flush_confusion() */ -void -rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) +static void +rr_obj_hdr_flush_confusion_writer(const void *params, MPI_Comm comm) { int i; int j; @@ -2008,7 +2011,7 @@ rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) * setup test bed related variables: */ - pt = (const H5Ptest_param_t *)GetTestParameters(); + pt = params; filename = pt->name; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank); @@ -2339,8 +2342,8 @@ rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) } /* rr_obj_hdr_flush_confusion_writer() */ -void -rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) +static void +rr_obj_hdr_flush_confusion_reader(const void *params, MPI_Comm comm) { int i; int j; @@ -2387,7 +2390,7 @@ rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) * setup test bed related variables: */ - pt = (const H5Ptest_param_t *)GetTestParameters(); + pt = params; filename = pt->name; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank); @@ -2702,7 +2705,7 @@ rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) #define EXTRA_ALIGN 100 void -chunk_align_bug_1(void) +chunk_align_bug_1(const void *params) { int mpi_rank; hid_t file_id, dset_id, fapl_id, dcpl_id, space_id; @@ -2726,7 +2729,7 @@ chunk_align_bug_1(void) return; } - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; /* Create file without alignment */ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c index c0dabf54431..e26dbe63e92 100644 --- a/testpar/t_mpi.c +++ b/testpar/t_mpi.c @@ -26,6 +26,9 @@ #include "testpar.h" +/* Include testing framework functionality */ +#include "testframe.h" + /* FILENAME and filenames must have the same number of names */ const char *FILENAME[2] = {"MPItest", NULL}; char filenames[2][200]; @@ -175,6 +178,8 @@ test_mpio_overlap_writes(char *filename) #define GB 1073741824 /* 1024**3 == 2**30 */ #define TWO_GB_LESS1 2147483647 /* 2**31 - 1 */ #define FOUR_GB_LESS1 4294967295L /* 2**32 - 1 */ + +#ifndef H5_HAVE_WIN32_API /* * Verify that MPI_Offset exceeding 2**31 can be computed correctly. * Print any failure as information only, not as an error so that this @@ -409,6 +414,7 @@ test_mpio_gb_file(char *filename) free(buf); return (nerrs); } +#endif /* * MPI-IO Test: One writes, Many reads. @@ -1008,8 +1014,10 @@ parse_options(int argc, char **argv) else { switch (*(*argv + 1)) { case 'v': - if (*((*argv + 1) + 1)) - ParseTestVerbosity((*argv + 1) + 1); + if (*((*argv + 1) + 1)) { + if (ParseTestVerbosity((*argv + 1) + 1) < 0) + return 1; + } else SetTestVerbosity(VERBO_MED); break; @@ -1120,7 +1128,12 @@ main(int argc, char **argv) H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); /* set alarm. */ - TestAlarmOn(); + if (TestAlarmOn() < 0) { + if (MAINPROCESS) + fprintf(stderr, "couldn't enable test timer\n"); + fflush(stderr); + MPI_Abort(MPI_COMM_WORLD, -1); + } /*======================================= * MPIO 1 write Many read test diff --git a/testpar/t_oflush.c b/testpar/t_oflush.c index 4a91be17719..70257a40f85 100644 --- a/testpar/t_oflush.c +++ b/testpar/t_oflush.c @@ -25,7 +25,7 @@ #define RANK 2 void -test_oflush(void) +test_oflush(const void *params) { int mpi_size, mpi_rank; hid_t file, dataset; @@ -50,7 +50,7 @@ test_oflush(void) for (i = 0; i < NY; i++) data[j][i] = i + j; - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); VRFY((file >= 0), "file creation succeeded"); diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c index 7fdefeb3ee9..6a3909173d4 100644 --- a/testpar/t_ph5basic.c +++ b/testpar/t_ph5basic.c @@ -28,7 +28,7 @@ *------------------------------------------------------------------------- */ void -test_fapl_mpio_dup(void) +test_fapl_mpio_dup(const void H5_ATTR_UNUSED *params) { int mpi_size, mpi_rank; MPI_Comm comm, comm_tmp; @@ -190,7 +190,7 @@ test_fapl_mpio_dup(void) *------------------------------------------------------------------------- */ void -test_get_dxpl_mpio(void) +test_get_dxpl_mpio(const void *params) { hid_t fid = H5I_INVALID_HID; hid_t sid = H5I_INVALID_HID; @@ -222,7 +222,7 @@ test_get_dxpl_mpio(void) VRFY((fapl >= 0), "Fapl creation succeeded"); /* Create a file */ - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); VRFY((fid >= 0), "H5Fcreate succeeded"); diff --git a/testpar/t_prestart.c b/testpar/t_prestart.c index 4fd7b5a0231..a2335fb6f53 100644 --- a/testpar/t_prestart.c +++ b/testpar/t_prestart.c @@ -15,7 +15,14 @@ * and makes sure the objects created are there. */ -#include "testphdf5.h" +#include "testpar.h" + +#define RANK 2 +#define ROW_FACTOR 8 /* Nominal row factor for dataset size */ +#define COL_FACTOR 16 /* Nominal column factor for dataset size */ + +/* Dataset data type. Int's can be easily octo dumped. */ +typedef int DATATYPE; int nerrors = 0; /* errors count */ @@ -115,8 +122,6 @@ main(int argc, char **argv) if (data_array) free(data_array); - nerrors += GetTestNumErrs(); - if (MAINPROCESS) { if (0 == nerrors) PASSED(); diff --git a/testpar/t_prop.c b/testpar/t_prop.c index af7b9a891f8..789796b7ebd 100644 --- a/testpar/t_prop.c +++ b/testpar/t_prop.c @@ -88,7 +88,7 @@ test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc) } void -test_plist_ed(void) +test_plist_ed(const void H5_ATTR_UNUSED *params) { hid_t dcpl; /* dataset create prop. list */ hid_t dapl; /* dataset access prop. list */ @@ -451,7 +451,7 @@ test_plist_ed(void) } void -external_links(void) +external_links(const void H5_ATTR_UNUSED *params) { hid_t lcpl = H5I_INVALID_HID; /* link create prop. list */ hid_t lapl = H5I_INVALID_HID; /* link access prop. list */ diff --git a/testpar/t_pshutdown.c b/testpar/t_pshutdown.c index 92f0bf17429..2133c9f7a26 100644 --- a/testpar/t_pshutdown.c +++ b/testpar/t_pshutdown.c @@ -19,7 +19,14 @@ * all created objects are there. */ -#include "testphdf5.h" +#include "testpar.h" + +#define RANK 2 +#define ROW_FACTOR 8 /* Nominal row factor for dataset size */ +#define COL_FACTOR 16 /* Nominal column factor for dataset size */ + +/* Dataset data type. Int's can be easily octo dumped. */ +typedef int DATATYPE; int nerrors = 0; /* errors count */ @@ -156,8 +163,6 @@ main(int argc, char **argv) MPI_Finalize(); - nerrors += GetTestNumErrs(); - if (MAINPROCESS) { if (0 == nerrors) PASSED(); diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c index 8aeed30e078..d841e7e9269 100644 --- a/testpar/t_shapesame.c +++ b/testpar/t_shapesame.c @@ -22,12 +22,19 @@ #define H5S_TESTING #include "H5Spkg.h" /* Dataspaces */ -#include "testphdf5.h" + +#include "testpar.h" + +/* Include testing framework functionality */ +#include "testframe.h" #ifndef PATH_MAX #define PATH_MAX 512 #endif +#define ROW_FACTOR 8 /* Nominal row factor for dataset size */ +#define COL_FACTOR 16 /* Nominal column factor for dataset size */ + /* FILENAME and filenames must have the same number of names. * Use PARATESTFILE in general and use a separated filename only if the file * created in one test is accessed by a different test. @@ -39,6 +46,21 @@ const char *FILENAME[NFILENAME] = {"ShapeSameTest", NULL}; char *filenames[NFILENAME]; hid_t fapl; /* file access property list */ +/* global variables */ +int dim0; +int dim1; +int chunkdim0; +int chunkdim1; +int nerrors = 0; /* errors count */ +int ndatasets = 300; /* number of datasets to create*/ +int ngroups = 512; /* number of groups to create in root + * group. */ +int facc_type = FACC_MPIO; /*Test file access type */ +int dxfer_coll_type = DXFER_COLLECTIVE_IO; + +H5E_auto2_t old_func; /* previous error handler */ +void *old_client_data; /* previous error handler arg.*/ + /* On Lustre (and perhaps other parallel file systems?), we have severe * slow downs if two or more processes attempt to access the same file system * block. To minimize this problem, we set alignment in the shape same tests @@ -111,6 +133,11 @@ struct hs_dr_pio_test_vars_t { int64_t tests_skipped; }; +/* Structure for passing test parameters around */ +typedef struct test_params_t { + char *filename; +} test_params_t; + /*------------------------------------------------------------------------- * Function: hs_dr_pio_test__setup() * @@ -125,10 +152,10 @@ struct hs_dr_pio_test_vars_t { #define CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG 0 static void -hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker_edge_size, - const int chunk_edge_size, const int small_rank, const int large_rank, - const bool use_collective_io, const hid_t dset_type, const int express_test, - struct hs_dr_pio_test_vars_t *tv_ptr) +hs_dr_pio_test__setup(const void *params, const int test_num, const int edge_size, + const int checker_edge_size, const int chunk_edge_size, const int small_rank, + const int large_rank, const bool use_collective_io, const hid_t dset_type, + const int express_test, struct hs_dr_pio_test_vars_t *tv_ptr) { #if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG const char *fcnName = "hs_dr_pio_test__setup()"; @@ -246,7 +273,7 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker memset(tv_ptr->large_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->large_ds_slice_size); - filename = (const char *)GetTestParameters(); + filename = ((const test_params_t *)params)->filename; assert(filename != NULL); #if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG if (MAINPROCESS) { @@ -1697,11 +1724,11 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) #define CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0 static void -contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size, - const int small_rank, const int large_rank, const bool use_collective_io, - const hid_t dset_type, int express_test, int *skips_ptr, int max_skips, - int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, - int mpi_rank) +contig_hs_dr_pio_test__run_test(const void *params, const int test_num, const int edge_size, + const int chunk_edge_size, const int small_rank, const int large_rank, + const bool use_collective_io, const hid_t dset_type, int express_test, + int *skips_ptr, int max_skips, int64_t *total_tests_ptr, + int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, int mpi_rank) { #if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG const char *fcnName = "contig_hs_dr_pio_test__run_test()"; @@ -1771,8 +1798,8 @@ contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1), small_rank, large_rank); - hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size, small_rank, large_rank, use_collective_io, - dset_type, express_test, tv_ptr); + hs_dr_pio_test__setup(params, test_num, edge_size, -1, chunk_edge_size, small_rank, large_rank, + use_collective_io, dset_type, express_test, tv_ptr); /* initialize skips & max_skips */ tv_ptr->skips = *skips_ptr; @@ -1885,7 +1912,7 @@ contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i #define CONTIG_HS_DR_PIO_TEST__DEBUG 0 static void -contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) +contig_hs_dr_pio_test(const void *params, ShapeSameTestMethods sstest_type) { int express_test; int local_express_test; @@ -1943,9 +1970,10 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) /* contiguous data set, independent I/O */ chunk_edge_size = 0; - contig_hs_dr_pio_test__run_test( - test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type, - express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); + contig_hs_dr_pio_test__run_test(params, test_num, edge_size, chunk_edge_size, small_rank, + large_rank, false, dset_type, express_test, &skips, + max_skips, &total_tests, &tests_run, &tests_skipped, + mpi_rank); test_num++; break; /* end of case IND_CONTIG */ @@ -1955,7 +1983,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) chunk_edge_size = 0; contig_hs_dr_pio_test__run_test( - test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type, + params, test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); test_num++; break; @@ -1965,9 +1993,10 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) /* chunked data set, independent I/O */ chunk_edge_size = 5; - contig_hs_dr_pio_test__run_test( - test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type, - express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); + contig_hs_dr_pio_test__run_test(params, test_num, edge_size, chunk_edge_size, small_rank, + large_rank, false, dset_type, express_test, &skips, + max_skips, &total_tests, &tests_run, &tests_skipped, + mpi_rank); test_num++; break; /* end of case IND_CHUNKED */ @@ -1977,7 +2006,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) chunk_edge_size = 5; contig_hs_dr_pio_test__run_test( - test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type, + params, test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); test_num++; break; @@ -3629,11 +3658,12 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) #define CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0 static void -ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int checker_edge_size, - const int chunk_edge_size, const int small_rank, const int large_rank, - const bool use_collective_io, const hid_t dset_type, const int express_test, - int *skips_ptr, int max_skips, int64_t *total_tests_ptr, - int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, int mpi_rank) +ckrbrd_hs_dr_pio_test__run_test(const void *params, const int test_num, const int edge_size, + const int checker_edge_size, const int chunk_edge_size, const int small_rank, + const int large_rank, const bool use_collective_io, const hid_t dset_type, + const int express_test, int *skips_ptr, int max_skips, + int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, + int mpi_rank) { #if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG @@ -3704,8 +3734,8 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1), small_rank, large_rank); - hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, - use_collective_io, dset_type, express_test, tv_ptr); + hs_dr_pio_test__setup(params, test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, + large_rank, use_collective_io, dset_type, express_test, tv_ptr); /* initialize skips & max_skips */ tv_ptr->skips = *skips_ptr; @@ -3800,7 +3830,7 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i */ static void -ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) +ckrbrd_hs_dr_pio_test(const void *params, ShapeSameTestMethods sstest_type) { int express_test; int local_express_test; @@ -3865,9 +3895,9 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) case IND_CONTIG: /* contiguous data set, independent I/O */ chunk_edge_size = 0; - ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, - small_rank, large_rank, false, dset_type, express_test, - &skips, max_skips, &total_tests, &tests_run, + ckrbrd_hs_dr_pio_test__run_test(params, test_num, edge_size, checker_edge_size, + chunk_edge_size, small_rank, large_rank, false, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); test_num++; break; @@ -3876,9 +3906,9 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) case COL_CONTIG: /* contiguous data set, collective I/O */ chunk_edge_size = 0; - ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, - small_rank, large_rank, true, dset_type, express_test, - &skips, max_skips, &total_tests, &tests_run, + ckrbrd_hs_dr_pio_test__run_test(params, test_num, edge_size, checker_edge_size, + chunk_edge_size, small_rank, large_rank, true, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); test_num++; break; @@ -3887,9 +3917,9 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) case IND_CHUNKED: /* chunked data set, independent I/O */ chunk_edge_size = 5; - ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, - small_rank, large_rank, false, dset_type, express_test, - &skips, max_skips, &total_tests, &tests_run, + ckrbrd_hs_dr_pio_test__run_test(params, test_num, edge_size, checker_edge_size, + chunk_edge_size, small_rank, large_rank, false, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); test_num++; break; @@ -3898,9 +3928,9 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) case COL_CHUNKED: /* chunked data set, collective I/O */ chunk_edge_size = 5; - ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, - small_rank, large_rank, true, dset_type, express_test, - &skips, max_skips, &total_tests, &tests_run, + ckrbrd_hs_dr_pio_test__run_test(params, test_num, edge_size, checker_edge_size, + chunk_edge_size, small_rank, large_rank, true, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); test_num++; break; @@ -3939,21 +3969,6 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) * Main driver of the Parallel HDF5 tests */ -/* global variables */ -int dim0; -int dim1; -int chunkdim0; -int chunkdim1; -int nerrors = 0; /* errors count */ -int ndatasets = 300; /* number of datasets to create*/ -int ngroups = 512; /* number of groups to create in root - * group. */ -int facc_type = FACC_MPIO; /*Test file access type */ -int dxfer_coll_type = DXFER_COLLECTIVE_IO; - -H5E_auto2_t old_func; /* previous error handler */ -void *old_client_data; /* previous error handler arg.*/ - /* other option flags */ #ifdef USE_PAUSE @@ -4015,20 +4030,20 @@ MPI_Init(int *argc, char ***argv) * Show command usage */ static void -usage(void) +usage(FILE *stream) { - printf(" [-r] [-w] [-m] [-n] " - "[-o] [-f ] [-d ]\n"); - printf("\t-m" - "\tset number of datasets for the multiple dataset test\n"); - printf("\t-n" - "\tset number of groups for the multiple group test\n"); - printf("\t-f \tfilename prefix\n"); - printf("\t-2\t\tuse Split-file together with MPIO\n"); - printf("\t-d \tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR, - COL_FACTOR); - printf("\t-c \tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); - printf("\n"); + fprintf(stream, " [-r] [-w] [-m] [-n] " + "[-o] [-f ] [-d ]\n"); + fprintf(stream, "\t-m" + "\tset number of datasets for the multiple dataset test\n"); + fprintf(stream, "\t-n" + "\tset number of groups for the multiple group test\n"); + fprintf(stream, "\t-f \tfilename prefix\n"); + fprintf(stream, "\t-2\t\tuse Split-file together with MPIO\n"); + fprintf(stream, "\t-d \tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR, + COL_FACTOR); + fprintf(stream, "\t-c \tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); + fprintf(stream, "\n"); } /* @@ -4162,120 +4177,68 @@ parse_options(int argc, char **argv) return (0); } -/* - * Create the appropriate File access property list - */ -hid_t -create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) -{ - hid_t ret_pl = H5I_INVALID_HID; - herr_t ret; /* generic return value */ - int mpi_rank; /* mpi variables */ - - /* need the rank for error checking macros */ - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); - - if (l_facc_type == FACC_DEFAULT) - return (ret_pl); - - if (l_facc_type == FACC_MPIO) { - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY((ret >= 0), ""); - ret = H5Pset_all_coll_metadata_ops(ret_pl, true); - VRFY((ret >= 0), ""); - ret = H5Pset_coll_metadata_write(ret_pl, true); - VRFY((ret >= 0), ""); - return (ret_pl); - } - - if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { - hid_t mpio_pl; - - mpio_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((mpio_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY((ret >= 0), ""); - - /* setup file access template */ - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return (ret_pl); - } - - /* unknown file access types */ - return (ret_pl); -} - /* Shape Same test using contiguous hyperslab using independent IO on contiguous datasets */ static void -sscontig1(void) +sscontig1(const void *params) { - contig_hs_dr_pio_test(IND_CONTIG); + contig_hs_dr_pio_test(params, IND_CONTIG); } /* Shape Same test using contiguous hyperslab using collective IO on contiguous datasets */ static void -sscontig2(void) +sscontig2(const void *params) { - contig_hs_dr_pio_test(COL_CONTIG); + contig_hs_dr_pio_test(params, COL_CONTIG); } /* Shape Same test using contiguous hyperslab using independent IO on chunked datasets */ static void -sscontig3(void) +sscontig3(const void *params) { - contig_hs_dr_pio_test(IND_CHUNKED); + contig_hs_dr_pio_test(params, IND_CHUNKED); } /* Shape Same test using contiguous hyperslab using collective IO on chunked datasets */ static void -sscontig4(void) +sscontig4(const void *params) { - contig_hs_dr_pio_test(COL_CHUNKED); + contig_hs_dr_pio_test(params, COL_CHUNKED); } /* Shape Same test using checker hyperslab using independent IO on contiguous datasets */ static void -sschecker1(void) +sschecker1(const void *params) { - ckrbrd_hs_dr_pio_test(IND_CONTIG); + ckrbrd_hs_dr_pio_test(params, IND_CONTIG); } /* Shape Same test using checker hyperslab using collective IO on contiguous datasets */ static void -sschecker2(void) +sschecker2(const void *params) { - ckrbrd_hs_dr_pio_test(COL_CONTIG); + ckrbrd_hs_dr_pio_test(params, COL_CONTIG); } /* Shape Same test using checker hyperslab using independent IO on chunked datasets */ static void -sschecker3(void) +sschecker3(const void *params) { - ckrbrd_hs_dr_pio_test(IND_CHUNKED); + ckrbrd_hs_dr_pio_test(params, IND_CHUNKED); } /* Shape Same test using checker hyperslab using collective IO on chunked datasets */ static void -sschecker4(void) +sschecker4(const void *params) { - ckrbrd_hs_dr_pio_test(COL_CHUNKED); + ckrbrd_hs_dr_pio_test(params, COL_CHUNKED); } int main(int argc, char **argv) { - int mpi_size, mpi_rank; /* mpi variables */ - int mpi_code; + test_params_t test_params; + int mpi_size, mpi_rank; /* mpi variables */ + int mpi_code; #ifdef H5_HAVE_TEST_API int required = MPI_THREAD_MULTIPLE; int provided; @@ -4319,8 +4282,6 @@ main(int argc, char **argv) return -1; } - mpi_rank_framework_g = mpi_rank; - dim0 = ROW_FACTOR * mpi_size; dim1 = COL_FACTOR * mpi_size; @@ -4374,28 +4335,51 @@ main(int argc, char **argv) } /* Initialize testing framework */ - TestInit(argv[0], usage, parse_options); + if (TestInit(argv[0], usage, parse_options, NULL, NULL, mpi_rank) < 0) { + if (MAINPROCESS) { + fprintf(stderr, "couldn't initialize testing framework\n"); + fflush(stderr); + } + + MPI_Finalize(); + return -1; + } + + test_params.filename = PARATESTFILE; /* Shape Same tests using contiguous hyperslab */ - AddTest("sscontig1", sscontig1, NULL, "Cntg hslab, ind IO, cntg dsets", PARATESTFILE); - AddTest("sscontig2", sscontig2, NULL, "Cntg hslab, col IO, cntg dsets", PARATESTFILE); - AddTest("sscontig3", sscontig3, NULL, "Cntg hslab, ind IO, chnk dsets", PARATESTFILE); - AddTest("sscontig4", sscontig4, NULL, "Cntg hslab, col IO, chnk dsets", PARATESTFILE); + AddTest("sscontig1", sscontig1, NULL, NULL, &test_params, sizeof(test_params), + "Cntg hslab, ind IO, cntg dsets"); + AddTest("sscontig2", sscontig2, NULL, NULL, &test_params, sizeof(test_params), + "Cntg hslab, col IO, cntg dsets"); + AddTest("sscontig3", sscontig3, NULL, NULL, &test_params, sizeof(test_params), + "Cntg hslab, ind IO, chnk dsets"); + AddTest("sscontig4", sscontig4, NULL, NULL, &test_params, sizeof(test_params), + "Cntg hslab, col IO, chnk dsets"); /* Shape Same tests using checker board hyperslab */ - AddTest("sschecker1", sschecker1, NULL, "Check hslab, ind IO, cntg dsets", PARATESTFILE); - AddTest("sschecker2", sschecker2, NULL, "Check hslab, col IO, cntg dsets", PARATESTFILE); - AddTest("sschecker3", sschecker3, NULL, "Check hslab, ind IO, chnk dsets", PARATESTFILE); - AddTest("sschecker4", sschecker4, NULL, "Check hslab, col IO, chnk dsets", PARATESTFILE); + AddTest("sschecker1", sschecker1, NULL, NULL, &test_params, sizeof(test_params), + "Check hslab, ind IO, cntg dsets"); + AddTest("sschecker2", sschecker2, NULL, NULL, &test_params, sizeof(test_params), + "Check hslab, col IO, cntg dsets"); + AddTest("sschecker3", sschecker3, NULL, NULL, &test_params, sizeof(test_params), + "Check hslab, ind IO, chnk dsets"); + AddTest("sschecker4", sschecker4, NULL, NULL, &test_params, sizeof(test_params), + "Check hslab, col IO, chnk dsets"); /* Display testing information */ - TestInfo(argv[0]); + TestInfo(stdout); /* setup file access property list */ H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); /* Parse command line arguments */ - TestParseCmdLine(argc, argv); + if (TestParseCmdLine(argc, argv) < 0) { + if (MAINPROCESS) + fprintf(stderr, "couldn't parse command-line arguments\n"); + TestShutdown(); + MPI_Abort(MPI_COMM_WORLD, -1); + } if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) { printf("===================================\n" @@ -4404,7 +4388,12 @@ main(int argc, char **argv) } /* Perform requested testing */ - PerformTests(); + if (PerformTests() < 0) { + if (MAINPROCESS) + fprintf(stderr, "couldn't run tests\n"); + TestShutdown(); + MPI_Abort(MPI_COMM_WORLD, -1); + } /* make sure all processes are finished before final report, cleanup * and exit. @@ -4413,7 +4402,7 @@ main(int argc, char **argv) /* Display test summary, if requested */ if (MAINPROCESS && GetTestSummary()) - TestSummary(); + TestSummary(stdout); /* Clean up test files */ h5_delete_all_test_files(FILENAME, fapl); @@ -4446,7 +4435,11 @@ main(int argc, char **argv) H5close(); /* Release test infrastructure */ - TestShutdown(); + if (TestShutdown() < 0) { + if (MAINPROCESS) + fprintf(stderr, "couldn't shut down testing framework\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } MPI_Finalize(); diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c index b381ef5d77c..1541bf6b894 100644 --- a/testpar/t_span_tree.c +++ b/testpar/t_span_tree.c @@ -36,8 +36,8 @@ #define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0 -static void coll_write_test(int chunk_factor); -static void coll_read_test(void); +static void coll_write_test(const void *params, int chunk_factor); +static void coll_read_test(const void *params); /*------------------------------------------------------------------------- * Function: coll_irregular_cont_write @@ -52,7 +52,7 @@ static void coll_read_test(void); *------------------------------------------------------------------------- */ void -coll_irregular_cont_write(void) +coll_irregular_cont_write(const void *params) { int mpi_rank; @@ -71,7 +71,7 @@ coll_irregular_cont_write(void) return; } - coll_write_test(0); + coll_write_test(params, 0); } /*------------------------------------------------------------------------- @@ -87,7 +87,7 @@ coll_irregular_cont_write(void) *------------------------------------------------------------------------- */ void -coll_irregular_cont_read(void) +coll_irregular_cont_read(const void *params) { int mpi_rank; @@ -106,7 +106,7 @@ coll_irregular_cont_read(void) return; } - coll_read_test(); + coll_read_test(params); } /*------------------------------------------------------------------------- @@ -122,7 +122,7 @@ coll_irregular_cont_read(void) *------------------------------------------------------------------------- */ void -coll_irregular_simple_chunk_write(void) +coll_irregular_simple_chunk_write(const void *params) { int mpi_rank; @@ -141,7 +141,7 @@ coll_irregular_simple_chunk_write(void) return; } - coll_write_test(1); + coll_write_test(params, 1); } /*------------------------------------------------------------------------- @@ -157,7 +157,7 @@ coll_irregular_simple_chunk_write(void) *------------------------------------------------------------------------- */ void -coll_irregular_simple_chunk_read(void) +coll_irregular_simple_chunk_read(const void *params) { int mpi_rank; @@ -176,7 +176,7 @@ coll_irregular_simple_chunk_read(void) return; } - coll_read_test(); + coll_read_test(params); } /*------------------------------------------------------------------------- @@ -192,7 +192,7 @@ coll_irregular_simple_chunk_read(void) *------------------------------------------------------------------------- */ void -coll_irregular_complex_chunk_write(void) +coll_irregular_complex_chunk_write(const void *params) { int mpi_rank; @@ -211,7 +211,7 @@ coll_irregular_complex_chunk_write(void) return; } - coll_write_test(4); + coll_write_test(params, 4); } /*------------------------------------------------------------------------- @@ -227,7 +227,7 @@ coll_irregular_complex_chunk_write(void) *------------------------------------------------------------------------- */ void -coll_irregular_complex_chunk_read(void) +coll_irregular_complex_chunk_read(const void *params) { int mpi_rank; @@ -246,7 +246,7 @@ coll_irregular_complex_chunk_read(void) return; } - coll_read_test(); + coll_read_test(params); } /*------------------------------------------------------------------------- @@ -263,7 +263,7 @@ coll_irregular_complex_chunk_read(void) *------------------------------------------------------------------------- */ void -coll_write_test(int chunk_factor) +coll_write_test(const void *params, int chunk_factor) { const char *filename; @@ -301,7 +301,7 @@ coll_write_test(int chunk_factor) MPI_Comm_rank(comm, &mpi_rank); /* Obtain file name */ - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; /* * Buffers' initialization. @@ -717,7 +717,7 @@ coll_write_test(int chunk_factor) *------------------------------------------------------------------------- */ static void -coll_read_test(void) +coll_read_test(const void *params) { const char *filename; @@ -751,7 +751,7 @@ coll_read_test(void) MPI_Comm_rank(comm, &mpi_rank); /* Obtain file name */ - filename = GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; /* Initialize the buffer */ @@ -1504,8 +1504,8 @@ lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr, #define LDSCT_DS_RANK 5 static void -lower_dim_size_comp_test__run_test(const int chunk_edge_size, const bool use_collective_io, - const hid_t dset_type) +lower_dim_size_comp_test__run_test(const void *params, const int chunk_edge_size, + const bool use_collective_io, const hid_t dset_type) { #if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG const char *fcnName = "lower_dim_size_comp_test__run_test()"; @@ -1636,7 +1636,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const bool use_col /* get the file name */ - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; assert(filename != NULL); /* ---------------------------------------- @@ -2349,7 +2349,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const bool use_col */ void -lower_dim_size_comp_test(void) +lower_dim_size_comp_test(const void *params) { /* const char *fcnName = "lower_dim_size_comp_test()"; */ int chunk_edge_size = 0; @@ -2372,10 +2372,10 @@ lower_dim_size_comp_test(void) HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) { chunk_edge_size = 0; - lower_dim_size_comp_test__run_test(chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT); + lower_dim_size_comp_test__run_test(params, chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT); chunk_edge_size = 5; - lower_dim_size_comp_test__run_test(chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT); + lower_dim_size_comp_test__run_test(params, chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT); } /* end for */ return; @@ -2411,7 +2411,7 @@ lower_dim_size_comp_test(void) #define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16 void -link_chunk_collective_io_test(void) +link_chunk_collective_io_test(const void *params) { /* const char *fcnName = "link_chunk_collective_io_test()"; */ const char *filename; @@ -2459,7 +2459,7 @@ link_chunk_collective_io_test(void) assert(mpi_size > 0); /* get the file name */ - filename = (const char *)GetTestParameters(); + filename = ((const H5Ptest_param_t *)params)->name; assert(filename != NULL); /* setup file access template */ diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index aa2da851eaa..68ae70c8ade 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -27,6 +27,9 @@ #ifdef H5_HAVE_SUBFILING_VFD +/* Include testing framework functionality -- currently just for test alarm timer */ +#include "testframe.h" + #include "H5FDsubfiling.h" #include "H5FDioc.h" @@ -3132,7 +3135,12 @@ main(int argc, char **argv) printf("Testing Subfiling VFD functionality\n"); } - TestAlarmOn(); + if (TestAlarmOn() < 0) { + if (MAINPROCESS) + fprintf(stderr, "couldn't enable test timer\n"); + nerrors++; + goto exit; + } /* * Obtain and broadcast seed value since ranks diff --git a/testpar/t_vfd.c b/testpar/t_vfd.c index 3b924784ee3..5069577a318 100644 --- a/testpar/t_vfd.c +++ b/testpar/t_vfd.c @@ -14,7 +14,7 @@ * This file is a catchall for parallel VFD tests. */ -#include "testphdf5.h" +#include "testpar.h" #ifdef H5_HAVE_SUBFILING_VFD #include "H5FDsubfiling.h" diff --git a/testpar/testpar.c b/testpar/testpar.c new file mode 100644 index 00000000000..c674f61c5c2 --- /dev/null +++ b/testpar/testpar.c @@ -0,0 +1,107 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Purpose: Provides support functions for hdf5 parallel tests. + */ + +#include "testpar.h" + +#define MAX_RANK 2 + +/* + * Create the appropriate File access property list + */ +hid_t +create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) +{ + hid_t ret_pl = H5I_INVALID_HID; + herr_t ret; /* generic return value */ + int mpi_rank; /* mpi variables */ + + /* need the rank for error checking macros */ + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + if ((ret_pl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + return H5I_INVALID_HID; + + if (l_facc_type == FACC_DEFAULT) + return ret_pl; + + if (l_facc_type == FACC_MPIO) { + /* set Parallel access with communicator */ + if ((ret = H5Pset_fapl_mpio(ret_pl, comm, info)) < 0) + return H5I_INVALID_HID; + if ((ret = H5Pset_all_coll_metadata_ops(ret_pl, true)) < 0) + return H5I_INVALID_HID; + if ((ret = H5Pset_coll_metadata_write(ret_pl, true)) < 0) + return H5I_INVALID_HID; + return ret_pl; + } + + if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { + hid_t mpio_pl; + + if ((mpio_pl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + return H5I_INVALID_HID; + /* set Parallel access with communicator */ + if ((ret = H5Pset_fapl_mpio(mpio_pl, comm, info)) < 0) + return H5I_INVALID_HID; + + /* setup file access template */ + if ((ret_pl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + return H5I_INVALID_HID; + /* set Parallel access with communicator */ + if ((ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl)) < 0) + return H5I_INVALID_HID; + if (H5Pclose(mpio_pl) < 0) + return H5I_INVALID_HID; + + return ret_pl; + } + + /* unknown file access types */ + return ret_pl; +} + +/* + * Setup the coordinates for point selection. + */ +void +point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, + hsize_t coords[], int order) +{ + hsize_t i, j, k = 0, m, n, s1, s2; + + HDcompile_assert(MAX_RANK == 2); + + if (OUT_OF_ORDER == order) + k = (num_points * MAX_RANK) - 1; + else if (IN_ORDER == order) + k = 0; + + s1 = start[0]; + s2 = start[1]; + + for (i = 0; i < count[0]; i++) + for (j = 0; j < count[1]; j++) + for (m = 0; m < block[0]; m++) + for (n = 0; n < block[1]; n++) + if (OUT_OF_ORDER == order) { + coords[k--] = s2 + (stride[1] * j) + n; + coords[k--] = s1 + (stride[0] * i) + m; + } + else if (IN_ORDER == order) { + coords[k++] = s1 + stride[0] * i + m; + coords[k++] = s2 + stride[1] * j + n; + } +} diff --git a/testpar/testpar.h b/testpar/testpar.h index ca0f608d82e..71ff72a5f13 100644 --- a/testpar/testpar.h +++ b/testpar/testpar.h @@ -20,7 +20,30 @@ #include "h5test.h" +/* For now, include testing framework functionality since the MESG, VRFY, + * etc. macros depend on the test verbosity level + */ +#include "testframe.h" + +/* File_Access_type bits */ +#define FACC_DEFAULT 0x0 /* default */ +#define FACC_MPIO 0x1 /* MPIO */ +#define FACC_SPLIT 0x2 /* Split File */ + /* Constants definitions */ +#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO */ +#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */ + +/* Hyperslab layout styles */ +#define BYROW 1 /* divide into slabs of rows */ +#define BYCOL 2 /* divide into blocks of columns */ +#define ZROW 3 /* same as BYCOL except process 0 gets 0 rows */ +#define ZCOL 4 /* same as BYCOL except process 0 gets 0 columns */ + +/* point selection order */ +#define IN_ORDER 1 +#define OUT_OF_ORDER 2 + #define MAX_ERR_REPORT 10 /* Maximum number of errors reported */ /* Define some handy debugging shorthands, routines, ... */ @@ -104,6 +127,40 @@ MPI_BANNER("SYNC DONE"); \ } while (0) +/* Shared enum for some parallel tests that + * contains values to determine how parallel + * I/O is performed + */ +enum H5TEST_COLL_CHUNK_API { + API_NONE = 0, + API_LINK_HARD, + API_MULTI_HARD, + API_LINK_TRUE, + API_LINK_FALSE, + API_MULTI_COLL, + API_MULTI_IND +}; + +/* Shape Same Tests Definitions */ +typedef enum { + IND_CONTIG, /* Independent IO on contiguous datasets */ + COL_CONTIG, /* Collective IO on contiguous datasets */ + IND_CHUNKED, /* Independent IO on chunked datasets */ + COL_CHUNKED /* Collective IO on chunked datasets */ +} ShapeSameTestMethods; + /* End of Define some handy debugging shorthands, routines, ... */ +#ifdef __cplusplus +extern "C" { +#endif + +hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type); + +void point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, + hsize_t coords[], int order); + +#ifdef __cplusplus +} +#endif #endif /* TESTPAR_H */ diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index 960a5cf67a3..2f30e722323 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -104,20 +104,20 @@ MPI_Init(int *argc, char ***argv) * Show command usage */ static void -usage(void) +usage(FILE *stream) { - printf(" [-r] [-w] [-m] [-n] " - "[-o] [-f ] [-d ]\n"); - printf("\t-m" - "\tset number of datasets for the multiple dataset test\n"); - printf("\t-n" - "\tset number of groups for the multiple group test\n"); - printf("\t-f \tfilename prefix\n"); - printf("\t-2\t\tuse Split-file together with MPIO\n"); - printf("\t-d \tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR, - COL_FACTOR); - printf("\t-c \tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); - printf("\n"); + fprintf(stream, " [-r] [-w] [-m] [-n] " + "[-o] [-f ] [-d ]\n"); + fprintf(stream, "\t-m" + "\tset number of datasets for the multiple dataset test\n"); + fprintf(stream, "\t-n" + "\tset number of groups for the multiple group test\n"); + fprintf(stream, "\t-f \tfilename prefix\n"); + fprintf(stream, "\t-2\t\tuse Split-file together with MPIO\n"); + fprintf(stream, "\t-d \tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR, + COL_FACTOR); + fprintf(stream, "\t-c \tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); + fprintf(stream, "\n"); } /* @@ -248,68 +248,12 @@ parse_options(int argc, char **argv) return (0); } -/* - * Create the appropriate File access property list - */ -hid_t -create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) -{ - hid_t ret_pl = H5I_INVALID_HID; - herr_t ret; /* generic return value */ - int mpi_rank; /* mpi variables */ - - /* need the rank for error checking macros */ - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), "H5Pcreate succeeded"); - - if (l_facc_type == FACC_DEFAULT) - return (ret_pl); - - if (l_facc_type == FACC_MPIO) { - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY((ret >= 0), "H5Pset_fapl_mpio succeeded"); - ret = H5Pset_all_coll_metadata_ops(ret_pl, true); - VRFY((ret >= 0), "H5Pset_all_coll_metadata_ops succeeded"); - ret = H5Pset_coll_metadata_write(ret_pl, true); - VRFY((ret >= 0), "H5Pset_coll_metadata_write succeeded"); - return (ret_pl); - } - - if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { - hid_t mpio_pl; - - mpio_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((mpio_pl >= 0), "H5Pcreate succeeded"); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY((ret >= 0), "H5Pset_fapl_mpio succeeded"); - - /* setup file access template */ - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), "H5Pcreate succeeded"); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return (ret_pl); - } - - /* unknown file access types */ - return (ret_pl); -} - int main(int argc, char **argv) { + H5Ptest_param_t test_params; int mpi_size, mpi_rank; /* mpi variables */ int mpi_code; - H5Ptest_param_t ndsets_params, ngroups_params; - H5Ptest_param_t collngroups_params; - H5Ptest_param_t io_mode_confusion_params; - H5Ptest_param_t rr_obj_flush_confusion_params; #ifdef H5_HAVE_TEST_API int required = MPI_THREAD_MULTIPLE; int provided; @@ -353,8 +297,6 @@ main(int argc, char **argv) return -1; } - mpi_rank_framework_g = mpi_rank; - dim0 = ROW_FACTOR * mpi_size; dim1 = COL_FACTOR * mpi_size; @@ -393,153 +335,183 @@ main(int argc, char **argv) VRFY((H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g) >= 0), "H5Pget_vol_cap_flags succeeded"); /* Initialize testing framework */ - TestInit(argv[0], usage, parse_options); - - /* Tests are generally arranged from least to most complexity... */ - AddTest("mpiodup", test_fapl_mpio_dup, NULL, "fapl_mpio duplicate", NULL); - AddTest("getdxplmpio", test_get_dxpl_mpio, NULL, "dxpl_mpio get", PARATESTFILE); - - AddTest("split", test_split_comm_access, NULL, "dataset using split communicators", PARATESTFILE); - AddTest("h5oflusherror", test_oflush, NULL, "H5Oflush failure", PARATESTFILE); - - AddTest("page_buffer", test_page_buffer_access, NULL, "page buffer usage in parallel", PARATESTFILE); - - AddTest("props", test_file_properties, NULL, "Coll Metadata file property settings", PARATESTFILE); - - AddTest("delete", test_delete, NULL, "MPI-IO VFD file delete", PARATESTFILE); - - AddTest("invlibverassert", test_invalid_libver_bounds_file_close_assert, NULL, - "Invalid libver bounds assertion failure", PARATESTFILE); - - AddTest("evictparassert", test_evict_on_close_parallel_unsupp, NULL, "Evict on close in parallel failure", - PARATESTFILE); - AddTest("fapl_preserve", test_fapl_preserve_hints, NULL, "preserve MPI I/O hints after fapl closed", - PARATESTFILE); - - AddTest("idsetw", dataset_writeInd, NULL, "dataset independent write", PARATESTFILE); - AddTest("idsetr", dataset_readInd, NULL, "dataset independent read", PARATESTFILE); + if (TestInit(argv[0], usage, parse_options, NULL, NULL, mpi_rank) < 0) { + if (MAINPROCESS) + fprintf(stderr, "couldn't initialize testing framework\n"); + MPI_Finalize(); + return -1; + } - AddTest("cdsetw", dataset_writeAll, NULL, "dataset collective write", PARATESTFILE); - AddTest("cdsetr", dataset_readAll, NULL, "dataset collective read", PARATESTFILE); + test_params.name = PARATESTFILE; + test_params.count = 0; - AddTest("eidsetw", extend_writeInd, NULL, "extendible dataset independent write", PARATESTFILE); - AddTest("eidsetr", extend_readInd, NULL, "extendible dataset independent read", PARATESTFILE); - AddTest("ecdsetw", extend_writeAll, NULL, "extendible dataset collective write", PARATESTFILE); - AddTest("ecdsetr", extend_readAll, NULL, "extendible dataset collective read", PARATESTFILE); - AddTest("eidsetw2", extend_writeInd2, NULL, "extendible dataset independent write #2", PARATESTFILE); - AddTest("selnone", none_selection_chunk, NULL, "chunked dataset with none-selection", PARATESTFILE); - AddTest("calloc", test_chunk_alloc, NULL, "parallel extend Chunked allocation on serial file", - PARATESTFILE); - AddTest("chkallocser2par", test_chunk_alloc_incr_ser_to_par, NULL, - "chunk allocation from serial to parallel file access", PARATESTFILE); - AddTest("fltread", test_filter_read, NULL, "parallel read of dataset written serially with filters", - PARATESTFILE); + /* Tests are generally arranged from least to most complexity... */ + AddTest("mpiodup", test_fapl_mpio_dup, NULL, NULL, NULL, 0, "fapl_mpio duplicate"); + AddTest("getdxplmpio", test_get_dxpl_mpio, NULL, NULL, &test_params, sizeof(test_params), + "dxpl_mpio get"); + + AddTest("split", test_split_comm_access, NULL, NULL, &test_params, sizeof(test_params), + "dataset using split communicators"); + AddTest("h5oflusherror", test_oflush, NULL, NULL, &test_params, sizeof(test_params), "H5Oflush failure"); + + AddTest("page_buffer", test_page_buffer_access, NULL, NULL, &test_params, sizeof(test_params), + "page buffer usage in parallel"); + + AddTest("props", test_file_properties, NULL, NULL, &test_params, sizeof(test_params), + "Coll Metadata file property settings"); + + AddTest("delete", test_delete, NULL, NULL, &test_params, sizeof(test_params), "MPI-IO VFD file delete"); + + AddTest("invlibverassert", test_invalid_libver_bounds_file_close_assert, NULL, NULL, &test_params, + sizeof(test_params), "Invalid libver bounds assertion failure"); + + AddTest("evictparassert", test_evict_on_close_parallel_unsupp, NULL, NULL, &test_params, + sizeof(test_params), "Evict on close in parallel failure"); + AddTest("fapl_preserve", test_fapl_preserve_hints, NULL, NULL, &test_params, sizeof(test_params), + "preserve MPI I/O hints after fapl closed"); + + AddTest("idsetw", dataset_writeInd, NULL, NULL, &test_params, sizeof(test_params), + "dataset independent write"); + AddTest("idsetr", dataset_readInd, NULL, NULL, &test_params, sizeof(test_params), + "dataset independent read"); + + AddTest("cdsetw", dataset_writeAll, NULL, NULL, &test_params, sizeof(test_params), + "dataset collective write"); + AddTest("cdsetr", dataset_readAll, NULL, NULL, &test_params, sizeof(test_params), + "dataset collective read"); + + AddTest("eidsetw", extend_writeInd, NULL, NULL, &test_params, sizeof(test_params), + "extendible dataset independent write"); + AddTest("eidsetr", extend_readInd, NULL, NULL, &test_params, sizeof(test_params), + "extendible dataset independent read"); + AddTest("ecdsetw", extend_writeAll, NULL, NULL, &test_params, sizeof(test_params), + "extendible dataset collective write"); + AddTest("ecdsetr", extend_readAll, NULL, NULL, &test_params, sizeof(test_params), + "extendible dataset collective read"); + AddTest("eidsetw2", extend_writeInd2, NULL, NULL, &test_params, sizeof(test_params), + "extendible dataset independent write #2"); + AddTest("selnone", none_selection_chunk, NULL, NULL, &test_params, sizeof(test_params), + "chunked dataset with none-selection"); + AddTest("calloc", test_chunk_alloc, NULL, NULL, &test_params, sizeof(test_params), + "parallel extend Chunked allocation on serial file"); + AddTest("chkallocser2par", test_chunk_alloc_incr_ser_to_par, NULL, NULL, &test_params, + sizeof(test_params), "chunk allocation from serial to parallel file access"); + AddTest("fltread", test_filter_read, NULL, NULL, &test_params, sizeof(test_params), + "parallel read of dataset written serially with filters"); #ifdef H5_HAVE_FILTER_DEFLATE - AddTest("cmpdsetr", compress_readAll, NULL, "compressed dataset collective read", PARATESTFILE); + AddTest("cmpdsetr", compress_readAll, NULL, NULL, &test_params, sizeof(test_params), + "compressed dataset collective read"); #endif /* H5_HAVE_FILTER_DEFLATE */ - AddTest("zerodsetr", zero_dim_dset, NULL, "zero dim dset", PARATESTFILE); + AddTest("zerodsetr", zero_dim_dset, NULL, NULL, &test_params, sizeof(test_params), "zero dim dset"); - ndsets_params.name = PARATESTFILE; - ndsets_params.count = ndatasets; - AddTest("ndsetw", multiple_dset_write, NULL, "multiple datasets write", &ndsets_params); + test_params.count = ndatasets; + AddTest("ndsetw", multiple_dset_write, NULL, NULL, &test_params, sizeof(test_params), + "multiple datasets write"); - ngroups_params.name = PARATESTFILE; - ngroups_params.count = ngroups; - AddTest("ngrpw", multiple_group_write, NULL, "multiple groups write", &ngroups_params); - AddTest("ngrpr", multiple_group_read, NULL, "multiple groups read", &ngroups_params); + test_params.count = ngroups; + AddTest("ngrpw", multiple_group_write, NULL, NULL, &test_params, sizeof(test_params), + "multiple groups write"); + AddTest("ngrpr", multiple_group_read, NULL, NULL, &test_params, sizeof(test_params), + "multiple groups read"); - AddTest("compact", compact_dataset, NULL, "compact dataset test", PARATESTFILE); + AddTest("compact", compact_dataset, NULL, NULL, &test_params, sizeof(test_params), + "compact dataset test"); - collngroups_params.name = PARATESTFILE; - collngroups_params.count = ngroups; + test_params.count = ngroups; /* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */ - AddTest("cngrpw-ingrpr", collective_group_write_independent_group_read, NULL, - "collective grp/dset write - independent grp/dset read", &collngroups_params); + AddTest("cngrpw-ingrpr", collective_group_write_independent_group_read, NULL, NULL, &test_params, + sizeof(test_params), "collective grp/dset write - independent grp/dset read"); #ifndef H5_HAVE_WIN32_API - AddTest("bigdset", big_dataset, NULL, "big dataset test", PARATESTFILE); + AddTest("bigdset", big_dataset, NULL, NULL, &test_params, sizeof(test_params), "big dataset test"); #else printf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n"); #endif - AddTest("fill", dataset_fillvalue, NULL, "dataset fill value", PARATESTFILE); + AddTest("fill", dataset_fillvalue, NULL, NULL, &test_params, sizeof(test_params), "dataset fill value"); - AddTest("cchunk1", coll_chunk1, NULL, "simple collective chunk io", PARATESTFILE); - AddTest("cchunk2", coll_chunk2, NULL, "noncontiguous collective chunk io", PARATESTFILE); - AddTest("cchunk3", coll_chunk3, NULL, "multi-chunk collective chunk io", PARATESTFILE); - AddTest("cchunk4", coll_chunk4, NULL, "collective chunk io with partial non-selection ", PARATESTFILE); + AddTest("cchunk1", coll_chunk1, NULL, NULL, &test_params, sizeof(test_params), + "simple collective chunk io"); + AddTest("cchunk2", coll_chunk2, NULL, NULL, &test_params, sizeof(test_params), + "noncontiguous collective chunk io"); + AddTest("cchunk3", coll_chunk3, NULL, NULL, &test_params, sizeof(test_params), + "multi-chunk collective chunk io"); + AddTest("cchunk4", coll_chunk4, NULL, NULL, &test_params, sizeof(test_params), + "collective chunk io with partial non-selection"); if ((mpi_size < 3) && MAINPROCESS) { printf("Collective chunk IO optimization APIs "); printf("needs at least 3 processes to participate\n"); printf("Collective chunk IO API tests will be skipped \n"); } - AddTest((mpi_size < 3) ? "-cchunk5" : "cchunk5", coll_chunk5, NULL, - "linked chunk collective IO without optimization", PARATESTFILE); - AddTest((mpi_size < 3) ? "-cchunk6" : "cchunk6", coll_chunk6, NULL, - "multi-chunk collective IO with direct request", PARATESTFILE); - AddTest((mpi_size < 3) ? "-cchunk7" : "cchunk7", coll_chunk7, NULL, - "linked chunk collective IO with optimization", PARATESTFILE); - AddTest((mpi_size < 3) ? "-cchunk8" : "cchunk8", coll_chunk8, NULL, - "linked chunk collective IO transferring to multi-chunk", PARATESTFILE); - AddTest((mpi_size < 3) ? "-cchunk9" : "cchunk9", coll_chunk9, NULL, - "multiple chunk collective IO with optimization", PARATESTFILE); - AddTest((mpi_size < 3) ? "-cchunk10" : "cchunk10", coll_chunk10, NULL, - "multiple chunk collective IO transferring to independent IO", PARATESTFILE); + AddTest((mpi_size < 3) ? "-cchunk5" : "cchunk5", coll_chunk5, NULL, NULL, &test_params, + sizeof(test_params), "linked chunk collective IO without optimization"); + AddTest((mpi_size < 3) ? "-cchunk6" : "cchunk6", coll_chunk6, NULL, NULL, &test_params, + sizeof(test_params), "multi-chunk collective IO with direct request"); + AddTest((mpi_size < 3) ? "-cchunk7" : "cchunk7", coll_chunk7, NULL, NULL, &test_params, + sizeof(test_params), "linked chunk collective IO with optimization"); + AddTest((mpi_size < 3) ? "-cchunk8" : "cchunk8", coll_chunk8, NULL, NULL, &test_params, + sizeof(test_params), "linked chunk collective IO transferring to multi-chunk"); + AddTest((mpi_size < 3) ? "-cchunk9" : "cchunk9", coll_chunk9, NULL, NULL, &test_params, + sizeof(test_params), "multiple chunk collective IO with optimization"); + AddTest((mpi_size < 3) ? "-cchunk10" : "cchunk10", coll_chunk10, NULL, NULL, &test_params, + sizeof(test_params), "multiple chunk collective IO transferring to independent IO"); /* irregular collective IO tests*/ - AddTest("ccontw", coll_irregular_cont_write, NULL, "collective irregular contiguous write", PARATESTFILE); - AddTest("ccontr", coll_irregular_cont_read, NULL, "collective irregular contiguous read", PARATESTFILE); - AddTest("cschunkw", coll_irregular_simple_chunk_write, NULL, "collective irregular simple chunk write", - PARATESTFILE); - AddTest("cschunkr", coll_irregular_simple_chunk_read, NULL, "collective irregular simple chunk read", - PARATESTFILE); - AddTest("ccchunkw", coll_irregular_complex_chunk_write, NULL, "collective irregular complex chunk write", - PARATESTFILE); - AddTest("ccchunkr", coll_irregular_complex_chunk_read, NULL, "collective irregular complex chunk read", - PARATESTFILE); - - AddTest("null", null_dataset, NULL, "null dataset test", PARATESTFILE); - - io_mode_confusion_params.name = PARATESTFILE; - io_mode_confusion_params.count = 0; /* value not used */ - - AddTest("I/Omodeconf", io_mode_confusion, NULL, "I/O mode confusion test -- hangs quickly on failure", - &io_mode_confusion_params); + AddTest("ccontw", coll_irregular_cont_write, NULL, NULL, &test_params, sizeof(test_params), + "collective irregular contiguous write"); + AddTest("ccontr", coll_irregular_cont_read, NULL, NULL, &test_params, sizeof(test_params), + "collective irregular contiguous read"); + AddTest("cschunkw", coll_irregular_simple_chunk_write, NULL, NULL, &test_params, sizeof(test_params), + "collective irregular simple chunk write"); + AddTest("cschunkr", coll_irregular_simple_chunk_read, NULL, NULL, &test_params, sizeof(test_params), + "collective irregular simple chunk read"); + AddTest("ccchunkw", coll_irregular_complex_chunk_write, NULL, NULL, &test_params, sizeof(test_params), + "collective irregular complex chunk write"); + AddTest("ccchunkr", coll_irregular_complex_chunk_read, NULL, NULL, &test_params, sizeof(test_params), + "collective irregular complex chunk read"); + + AddTest("null", null_dataset, NULL, NULL, &test_params, sizeof(test_params), "null dataset test"); + + test_params.count = 0; + AddTest("I/Omodeconf", io_mode_confusion, NULL, NULL, &test_params, sizeof(test_params), + "I/O mode confusion test -- hangs quickly on failure"); if ((mpi_size < 3) && MAINPROCESS) { printf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n"); printf("rr_obj_hdr_flush_confusion test will be skipped \n"); } if (mpi_size > 2) { - rr_obj_flush_confusion_params.name = PARATESTFILE; - rr_obj_flush_confusion_params.count = 0; /* value not used */ - AddTest("rrobjflushconf", rr_obj_hdr_flush_confusion, NULL, - "round robin object header flush confusion test", &rr_obj_flush_confusion_params); + test_params.count = 0; + AddTest("rrobjflushconf", rr_obj_hdr_flush_confusion, NULL, NULL, &test_params, sizeof(test_params), + "round robin object header flush confusion test"); } - AddTest("alnbg1", chunk_align_bug_1, NULL, "Chunk allocation with alignment bug.", PARATESTFILE); + AddTest("alnbg1", chunk_align_bug_1, NULL, NULL, &test_params, sizeof(test_params), + "Chunk allocation with alignment bug."); - AddTest("tldsc", lower_dim_size_comp_test, NULL, - "test lower dim size comp in span tree to mpi derived type", PARATESTFILE); + AddTest("tldsc", lower_dim_size_comp_test, NULL, NULL, &test_params, sizeof(test_params), + "test lower dim size comp in span tree to mpi derived type"); - AddTest("lccio", link_chunk_collective_io_test, NULL, "test mpi derived type management", PARATESTFILE); + AddTest("lccio", link_chunk_collective_io_test, NULL, NULL, &test_params, sizeof(test_params), + "test mpi derived type management"); - AddTest("actualio", actual_io_mode_tests, NULL, "test actual io mode proprerty", PARATESTFILE); + AddTest("actualio", actual_io_mode_tests, NULL, NULL, &test_params, sizeof(test_params), + "test actual io mode proprerty"); - AddTest("nocolcause", no_collective_cause_tests, NULL, "test cause for broken collective io", - PARATESTFILE); + AddTest("nocolcause", no_collective_cause_tests, NULL, NULL, &test_params, sizeof(test_params), + "test cause for broken collective io"); - AddTest("edpl", test_plist_ed, NULL, "encode/decode Property Lists", NULL); + AddTest("edpl", test_plist_ed, NULL, NULL, NULL, 0, "encode/decode Property Lists"); - AddTest("extlink", external_links, NULL, "test external links", NULL); + AddTest("extlink", external_links, NULL, NULL, NULL, 0, "test external links"); if ((mpi_size < 2) && MAINPROCESS) { printf("File Image Ops daisy chain test needs at least 2 processes.\n"); printf("File Image Ops daisy chain test will be skipped \n"); } - AddTest((mpi_size < 2) ? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL, - "file image ops daisy chain", NULL); + AddTest((mpi_size < 2) ? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL, NULL, NULL, 0, + "file image ops daisy chain"); /* Atomicity operations are not supported for OpenMPI versions < major * version 5 and will sporadically fail. @@ -557,31 +529,38 @@ main(int argc, char **argv) printf("Atomicity tests will not work with a non MPIO VFD\n"); } else if (mpi_size >= 2 && facc_type == FACC_MPIO) { - AddTest("atomicity", dataset_atomicity, NULL, "dataset atomic updates", PARATESTFILE); + AddTest("atomicity", dataset_atomicity, NULL, NULL, &test_params, sizeof(test_params), + "dataset atomic updates"); } #endif - AddTest("denseattr", test_dense_attr, NULL, "Store Dense Attributes", PARATESTFILE); + AddTest("denseattr", test_dense_attr, NULL, NULL, &test_params, sizeof(test_params), + "Store Dense Attributes"); - AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL, - "Collective Metadata read with some ranks having no selection", PARATESTFILE); - AddTest("MC_coll_MD_read", test_multi_chunk_io_addrmap_issue, NULL, - "Collective MD read with multi chunk I/O (H5D__chunk_addrmap)", PARATESTFILE); - AddTest("LC_coll_MD_read", test_link_chunk_io_sort_chunk_issue, NULL, - "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE); - AddTest("GH_coll_MD_wr", test_collective_global_heap_write, NULL, - "Collective MD write of global heap data", PARATESTFILE); - AddTest("COLLIO_INDMDWR", test_coll_io_ind_md_write, NULL, - "Collective I/O with Independent metadata writes", PARATESTFILE); + AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL, NULL, &test_params, + sizeof(test_params), "Collective Metadata read with some ranks having no selection"); + AddTest("MC_coll_MD_read", test_multi_chunk_io_addrmap_issue, NULL, NULL, &test_params, + sizeof(test_params), "Collective MD read with multi chunk I/O (H5D__chunk_addrmap)"); + AddTest("LC_coll_MD_read", test_link_chunk_io_sort_chunk_issue, NULL, NULL, &test_params, + sizeof(test_params), "Collective MD read with link chunk I/O (H5D__sort_chunk)"); + AddTest("GH_coll_MD_wr", test_collective_global_heap_write, NULL, NULL, &test_params, sizeof(test_params), + "Collective MD write of global heap data"); + AddTest("COLLIO_INDMDWR", test_coll_io_ind_md_write, NULL, NULL, &test_params, sizeof(test_params), + "Collective I/O with Independent metadata writes"); /* Display testing information */ - TestInfo(argv[0]); + TestInfo(stdout); /* setup file access property list */ H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); /* Parse command line arguments */ - TestParseCmdLine(argc, argv); + if (TestParseCmdLine(argc, argv) < 0) { + if (MAINPROCESS) + fprintf(stderr, "couldn't parse command-line arguments\n"); + TestShutdown(); + MPI_Abort(MPI_COMM_WORLD, -1); + } if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) { printf("===================================\n" @@ -590,7 +569,12 @@ main(int argc, char **argv) } /* Perform requested testing */ - PerformTests(); + if (PerformTests() < 0) { + if (MAINPROCESS) + fprintf(stderr, "couldn't run tests\n"); + TestShutdown(); + MPI_Abort(MPI_COMM_WORLD, -1); + } /* make sure all processes are finished before final report, cleanup * and exit. @@ -599,7 +583,7 @@ main(int argc, char **argv) /* Display test summary, if requested */ if (MAINPROCESS && GetTestSummary()) - TestSummary(); + TestSummary(stdout); /* Clean up test files */ h5_delete_all_test_files(FILENAME, fapl); @@ -632,7 +616,11 @@ main(int argc, char **argv) H5close(); /* Release test infrastructure */ - TestShutdown(); + if (TestShutdown() < 0) { + if (MAINPROCESS) + fprintf(stderr, "couldn't shut down testing framework\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ MPI_Finalize(); diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 31b7c6963d5..345045e4f28 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -15,17 +15,10 @@ #ifndef PHDF5TEST_H #define PHDF5TEST_H -#include "testpar.h" +/* Include testing framework functionality */ +#include "testframe.h" -enum H5TEST_COLL_CHUNK_API { - API_NONE = 0, - API_LINK_HARD, - API_MULTI_HARD, - API_LINK_TRUE, - API_LINK_FALSE, - API_MULTI_COLL, - API_MULTI_IND -}; +#include "testpar.h" #ifndef false #define false 0 @@ -51,23 +44,6 @@ enum H5TEST_COLL_CHUNK_API { #define DATASETNAME8 "Data8" #define DATASETNAME9 "Data9" -/* point selection order */ -#define IN_ORDER 1 -#define OUT_OF_ORDER 2 - -/* Hyperslab layout styles */ -#define BYROW 1 /* divide into slabs of rows */ -#define BYCOL 2 /* divide into blocks of columns */ -#define ZROW 3 /* same as BYCOL except process 0 gets 0 rows */ -#define ZCOL 4 /* same as BYCOL except process 0 gets 0 columns */ - -/* File_Access_type bits */ -#define FACC_DEFAULT 0x0 /* default */ -#define FACC_MPIO 0x1 /* MPIO */ -#define FACC_SPLIT 0x2 /* Split File */ - -#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/ -#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */ /*Constants for collective chunk definitions */ #define SPACE_DIM1 24 #define SPACE_DIM2 4 @@ -211,14 +187,6 @@ typedef struct H5Ptest_param_t /* holds extra test parameters */ /* Dataset data type. Int's can be easily octo dumped. */ typedef int DATATYPE; -/* Shape Same Tests Definitions */ -typedef enum { - IND_CONTIG, /* Independent IO on contiguous datasets */ - COL_CONTIG, /* Collective IO on contiguous datasets */ - IND_CHUNKED, /* Independent IO on chunked datasets */ - COL_CHUNKED /* Collective IO on chunked datasets */ -} ShapeSameTestMethods; - /* Shared global variables */ extern int dim0, dim1; /*Dataset dimensions */ extern int chunkdim0, chunkdim1; /*Chunk dimensions */ @@ -227,86 +195,81 @@ extern int facc_type; /*Test file access type */ extern int dxfer_coll_type; /* Test program prototypes */ -void test_plist_ed(void); -void external_links(void); -void zero_dim_dset(void); -void test_file_properties(void); -void test_delete(void); -void test_invalid_libver_bounds_file_close_assert(void); -void test_evict_on_close_parallel_unsupp(void); -void test_fapl_preserve_hints(void); -void multiple_dset_write(void); -void multiple_group_write(void); -void multiple_group_read(void); -void collective_group_write_independent_group_read(void); -void collective_group_write(void); -void independent_group_read(void); -void test_fapl_mpio_dup(void); -void test_get_dxpl_mpio(void); -void test_split_comm_access(void); -void test_page_buffer_access(void); -void dataset_atomicity(void); -void dataset_writeInd(void); -void dataset_writeAll(void); -void extend_writeInd(void); -void extend_writeInd2(void); -void extend_writeAll(void); -void dataset_readInd(void); -void dataset_readAll(void); -void extend_readInd(void); -void extend_readAll(void); -void none_selection_chunk(void); -void actual_io_mode_tests(void); -void no_collective_cause_tests(void); -void test_chunk_alloc(void); -void test_chunk_alloc_incr_ser_to_par(void); -void test_filter_read(void); -void compact_dataset(void); -void null_dataset(void); -void big_dataset(void); -void dataset_fillvalue(void); -void coll_chunk1(void); -void coll_chunk2(void); -void coll_chunk3(void); -void coll_chunk4(void); -void coll_chunk5(void); -void coll_chunk6(void); -void coll_chunk7(void); -void coll_chunk8(void); -void coll_chunk9(void); -void coll_chunk10(void); -void coll_irregular_cont_read(void); -void coll_irregular_cont_write(void); -void coll_irregular_simple_chunk_read(void); -void coll_irregular_simple_chunk_write(void); -void coll_irregular_complex_chunk_read(void); -void coll_irregular_complex_chunk_write(void); -void io_mode_confusion(void); -void rr_obj_hdr_flush_confusion(void); -void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm); -void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm); -void chunk_align_bug_1(void); -void lower_dim_size_comp_test(void); -void link_chunk_collective_io_test(void); +void test_plist_ed(const void *params); +void external_links(const void *params); +void zero_dim_dset(const void *params); +void test_file_properties(const void *params); +void test_delete(const void *params); +void test_invalid_libver_bounds_file_close_assert(const void *params); +void test_evict_on_close_parallel_unsupp(const void *params); +void test_fapl_preserve_hints(const void *params); +void multiple_dset_write(const void *params); +void multiple_group_write(const void *params); +void multiple_group_read(const void *params); +void collective_group_write_independent_group_read(const void *params); +void collective_group_write(const void *params); +void independent_group_read(const void *params); +void test_fapl_mpio_dup(const void *params); +void test_get_dxpl_mpio(const void *params); +void test_split_comm_access(const void *params); +void test_page_buffer_access(const void *params); +void dataset_atomicity(const void *params); +void dataset_writeInd(const void *params); +void dataset_writeAll(const void *params); +void extend_writeInd(const void *params); +void extend_writeInd2(const void *params); +void extend_writeAll(const void *params); +void dataset_readInd(const void *params); +void dataset_readAll(const void *params); +void extend_readInd(const void *params); +void extend_readAll(const void *params); +void none_selection_chunk(const void *params); +void actual_io_mode_tests(const void *params); +void no_collective_cause_tests(const void *params); +void test_chunk_alloc(const void *params); +void test_chunk_alloc_incr_ser_to_par(const void *params); +void test_filter_read(const void *params); +void compact_dataset(const void *params); +void null_dataset(const void *params); +void big_dataset(const void *params); +void dataset_fillvalue(const void *params); +void coll_chunk1(const void *params); +void coll_chunk2(const void *params); +void coll_chunk3(const void *params); +void coll_chunk4(const void *params); +void coll_chunk5(const void *params); +void coll_chunk6(const void *params); +void coll_chunk7(const void *params); +void coll_chunk8(const void *params); +void coll_chunk9(const void *params); +void coll_chunk10(const void *params); +void coll_irregular_cont_read(const void *params); +void coll_irregular_cont_write(const void *params); +void coll_irregular_simple_chunk_read(const void *params); +void coll_irregular_simple_chunk_write(const void *params); +void coll_irregular_complex_chunk_read(const void *params); +void coll_irregular_complex_chunk_write(const void *params); +void io_mode_confusion(const void *params); +void rr_obj_hdr_flush_confusion(const void *params); +void chunk_align_bug_1(const void *params); +void lower_dim_size_comp_test(const void *params); +void link_chunk_collective_io_test(const void *params); void contig_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type); void checker_board_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type); -void file_image_daisy_chain_test(void); +void file_image_daisy_chain_test(const void *params); #ifdef H5_HAVE_FILTER_DEFLATE -void compress_readAll(void); +void compress_readAll(const void *params); #endif /* H5_HAVE_FILTER_DEFLATE */ -void test_dense_attr(void); -void test_partial_no_selection_coll_md_read(void); -void test_multi_chunk_io_addrmap_issue(void); -void test_link_chunk_io_sort_chunk_issue(void); -void test_collective_global_heap_write(void); -void test_coll_io_ind_md_write(void); -void test_oflush(void); +void test_dense_attr(const void *params); +void test_partial_no_selection_coll_md_read(const void *params); +void test_multi_chunk_io_addrmap_issue(const void *params); +void test_link_chunk_io_sort_chunk_issue(const void *params); +void test_collective_global_heap_write(const void *params); +void test_coll_io_ind_md_write(const void *params); +void test_oflush(const void *params); /* commonly used prototypes */ -hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type); MPI_Offset h5_mpi_get_file_size(const char *filename, MPI_Comm comm, MPI_Info info); -int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, - DATATYPE *original); -void point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, - hsize_t coords[], int order); +int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, + DATATYPE *original); #endif /* PHDF5TEST_H */ diff --git a/tools/lib/h5tools.c b/tools/lib/h5tools.c index 29419e297af..83037f1de62 100644 --- a/tools/lib/h5tools.c +++ b/tools/lib/h5tools.c @@ -671,12 +671,8 @@ h5tools_set_fapl_vol(hid_t fapl_id, h5tools_vol_info_t *vol_info) /* Check for VOL connectors that ship with the library, then try * registering by name if that fails. */ - if (!strcmp(vol_info->u.name, H5VL_NATIVE_NAME)) { - connector_id = H5VL_NATIVE; - } - else if (!strcmp(vol_info->u.name, H5VL_PASSTHRU_NAME)) { + if (!strcmp(vol_info->u.name, H5VL_PASSTHRU_NAME)) connector_id = H5VL_PASSTHRU; - } else { /* NOTE: Not being able to pass in a VIPL may be a limitation for some * connectors. @@ -698,12 +694,8 @@ h5tools_set_fapl_vol(hid_t fapl_id, h5tools_vol_info_t *vol_info) } else { /* Check for VOL connectors that ship with the library */ - if (vol_info->u.value == H5VL_NATIVE_VALUE) { - connector_id = H5VL_NATIVE; - } - else if (vol_info->u.value == H5VL_PASSTHRU_VALUE) { + if (vol_info->u.value == H5VL_PASSTHRU_VALUE) connector_id = H5VL_PASSTHRU; - } else { /* NOTE: Not being able to pass in a VIPL may be a limitation for some * connectors. diff --git a/tools/lib/h5trav.c b/tools/lib/h5trav.c index 017e0625521..4376184e43d 100644 --- a/tools/lib/h5trav.c +++ b/tools/lib/h5trav.c @@ -701,7 +701,7 @@ trav_table_addlink(trav_table_t *table, const H5O_token_t *obj_token, const char if (table->objs[i].nlinks == (unsigned)table->objs[i].sizelinks) { table->objs[i].sizelinks = MAX(1, table->objs[i].sizelinks * 2); table->objs[i].links = (trav_link_t *)realloc( - table->objs[i].links, table->objs[i].sizelinks * sizeof(trav_link_t)); + table->objs[i].links, table->objs[i].sizelinks * sizeof(trav_link_t)); } /* end if */ /* insert it */ diff --git a/tools/libtest/h5tools_test_utils.c b/tools/libtest/h5tools_test_utils.c index 03c595f66cb..bfe62506b25 100644 --- a/tools/libtest/h5tools_test_utils.c +++ b/tools/libtest/h5tools_test_utils.c @@ -674,8 +674,8 @@ test_populate_ros3_fa(void) { H5FD_ros3_fapl_ext_t fa = {{bad_version, false, "a", "b", "c"}, "d"}; const char *values[] = {"somewhere over the rainbow not too high " - "there is another rainbow bounding some darkened sky", - "y", "z", ""}; + "there is another rainbow bounding some darkened sky", + "y", "z", ""}; if (show_progress) { printf("region overflow\n"); @@ -738,17 +738,17 @@ test_populate_ros3_fa(void) { H5FD_ros3_fapl_ext_t fa = {{bad_version, false, "a", "b", "c"}, "d"}; const char *values[] = {"x", - "Why is it necessary to solve the problem? " - "What benefits will you receive by solving the problem? " - "What is the unknown? " - "What is it you don't yet understand? " - "What is the information you have? " - "What isn't the problem? " - "Is the information insufficient, redundant, or contradictory? " - "Should you draw a diagram or figure of the problem? " - "What are the boundaries of the problem? " - "Can you separate the various parts of the problem?", - "z", ""}; + "Why is it necessary to solve the problem? " + "What benefits will you receive by solving the problem? " + "What is the unknown? " + "What is it you don't yet understand? " + "What is the information you have? " + "What isn't the problem? " + "Is the information insufficient, redundant, or contradictory? " + "Should you draw a diagram or figure of the problem? " + "What are the boundaries of the problem? " + "Can you separate the various parts of the problem?", + "z", ""}; if (show_progress) { printf("id overflow\n"); @@ -871,17 +871,17 @@ test_populate_ros3_fa(void) { H5FD_ros3_fapl_ext_t fa = {{bad_version, false, "a", "b", "c"}, "d"}; const char *values[] = {"x", "y", - "Why is it necessary to solve the problem? " - "What benefits will you receive by solving the problem? " - "What is the unknown? " - "What is it you don't yet understand? " - "What is the information you have? " - "What isn't the problem? " - "Is the information insufficient, redundant, or contradictory? " - "Should you draw a diagram or figure of the problem? " - "What are the boundaries of the problem? " - "Can you separate the various parts of the problem?", - ""}; + "Why is it necessary to solve the problem? " + "What benefits will you receive by solving the problem? " + "What is the unknown? " + "What is it you don't yet understand? " + "What is the information you have? " + "What isn't the problem? " + "Is the information insufficient, redundant, or contradictory? " + "Should you draw a diagram or figure of the problem? " + "What are the boundaries of the problem? " + "Can you separate the various parts of the problem?", + ""}; if (show_progress) { printf("key overflow\n"); @@ -903,7 +903,7 @@ test_populate_ros3_fa(void) { H5FD_ros3_fapl_ext_t fa = {{0, 0, "", "", ""}, ""}; const char *values[] = {"us-east-2", "AKIAIMC3D3XLYXLN5COA", - "ugs5aVVnLFCErO/8uW14iWE3K5AgXMpsMlWneO/+", ""}; + "ugs5aVVnLFCErO/8uW14iWE3K5AgXMpsMlWneO/+", ""}; JSVERIFY(1, h5tools_populate_ros3_fapl(&fa, values), "unable to set use case") JSVERIFY(1, fa.fa.version, "version check") JSVERIFY(1, fa.fa.authenticate, "should authenticate") diff --git a/tools/src/h5copy/h5copy.c b/tools/src/h5copy/h5copy.c index ef7e36f6532..a4d4027121d 100644 --- a/tools/src/h5copy/h5copy.c +++ b/tools/src/h5copy/h5copy.c @@ -20,16 +20,16 @@ /* command-line options: short and long-named parameters */ static const char *s_opts = "d:f:hi:o:ps:vVE*"; static struct h5_long_options l_opts[] = {{"destination", require_arg, 'd'}, - {"flag", require_arg, 'f'}, - {"help", no_arg, 'h'}, - {"input", require_arg, 'i'}, - {"output", require_arg, 'o'}, - {"parents", no_arg, 'p'}, - {"source", require_arg, 's'}, - {"verbose", no_arg, 'v'}, - {"version", no_arg, 'V'}, - {"enable-error-stack", optional_arg, 'E'}, - {NULL, 0, '\0'}}; + {"flag", require_arg, 'f'}, + {"help", no_arg, 'h'}, + {"input", require_arg, 'i'}, + {"output", require_arg, 'o'}, + {"parents", no_arg, 'p'}, + {"source", require_arg, 's'}, + {"verbose", no_arg, 'v'}, + {"version", no_arg, 'V'}, + {"enable-error-stack", optional_arg, 'E'}, + {NULL, 0, '\0'}}; static char *fname_src = NULL; static char *fname_dst = NULL; static char *oname_src = NULL; diff --git a/tools/src/h5dump/h5dump_extern.h b/tools/src/h5dump/h5dump_extern.h index 723a39edc4c..ca4a92906bf 100644 --- a/tools/src/h5dump/h5dump_extern.h +++ b/tools/src/h5dump/h5dump_extern.h @@ -44,7 +44,7 @@ typedef struct h5dump_table_list_t { table_t *group_table; /* Table of groups */ table_t *dset_table; /* Table of datasets */ table_t *type_table; /* Table of datatypes */ - } * tables; + } *tables; } h5dump_table_list_t; extern h5dump_table_list_t table_list; diff --git a/tools/src/h5repack/h5repack.c b/tools/src/h5repack/h5repack.c index fa1dc90081b..1cbed04df19 100644 --- a/tools/src/h5repack/h5repack.c +++ b/tools/src/h5repack/h5repack.c @@ -752,7 +752,7 @@ check_options(pack_opt_t *options) if (options->ublock_filename == NULL && options->ublock_size != 0) { if (options->verbose > 0) { - printf("Warning: user block file name missing. Reserving a size of %ld...\n", + printf("Warning: user block file name missing. Reserving a size of %" PRIuHSIZE "...\n", options->ublock_size); } } diff --git a/tools/src/h5repack/h5repack.h b/tools/src/h5repack/h5repack.h index 2977ffab6db..cdfeb86b5d4 100644 --- a/tools/src/h5repack/h5repack.h +++ b/tools/src/h5repack/h5repack.h @@ -119,7 +119,8 @@ * \li 3 This is #H5F_LIBVER_V112 in #H5F_libver_t struct * \li 4 This is #H5F_LIBVER_V114 in #H5F_libver_t struct * \li 5 This is #H5F_LIBVER_V116 in #H5F_libver_t struct - * \li #H5F_LIBVER_LATEST is aliased to #H5F_LIBVER_V116 for this release + * \li 6 This is #H5F_LIBVER_V118 in #H5F_libver_t struct + * \li #H5F_LIBVER_LATEST is aliased to #H5F_LIBVER_V118 for this release * * \subsubsection subsubsec_cltools_h5repack_options_fs File Strategy Settings * FS_STRATEGY is a string indicating the file space strategy used: diff --git a/tools/src/h5repack/h5repack_main.c b/tools/src/h5repack/h5repack_main.c index 9fb85f13cdd..baf60535dc9 100644 --- a/tools/src/h5repack/h5repack_main.c +++ b/tools/src/h5repack/h5repack_main.c @@ -229,8 +229,9 @@ usage(const char *prog) PRINTVALSTREAM(rawoutstream, " 3: This is H5F_LIBVER_V112 in H5F_libver_t struct\n"); PRINTVALSTREAM(rawoutstream, " 4: This is H5F_LIBVER_V114 in H5F_libver_t struct\n"); PRINTVALSTREAM(rawoutstream, " 5: This is H5F_LIBVER_V116 in H5F_libver_t struct\n"); + PRINTVALSTREAM(rawoutstream, " 6: This is H5F_LIBVER_V118 in H5F_libver_t struct\n"); PRINTVALSTREAM(rawoutstream, - " (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V116 for this release\n"); + " (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V118 for this release\n"); PRINTVALSTREAM(rawoutstream, "\n"); PRINTVALSTREAM(rawoutstream, " FS_STRATEGY is a string indicating the file space strategy used:\n"); PRINTVALSTREAM(rawoutstream, " FSM_AGGR:\n"); diff --git a/tools/test/h5diff/h5diffgentest.c b/tools/test/h5diff/h5diffgentest.c index e595dd34c0d..351e4153571 100644 --- a/tools/test/h5diff/h5diffgentest.c +++ b/tools/test/h5diff/h5diffgentest.c @@ -5697,13 +5697,13 @@ test_objs_strings(const char *fname1, const char *fname2) hsize_t dims2[] = {20}; char string2A[20][10] = {"ab cd ef1", "ab cd ef2", "ab cd ef3", "ab cd ef4", "ab cd ef5", - "ab cd ef6", "ab cd ef7", "ab cd ef8", "ab cd 9", "ab cd 0", - "ab cd 1", "ab cd 2", "ab cd ef3", "ab cd ef4", "ab cd ef5", - "ab cd ef6", "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0"}; + "ab cd ef6", "ab cd ef7", "ab cd ef8", "ab cd 9", "ab cd 0", + "ab cd 1", "ab cd 2", "ab cd ef3", "ab cd ef4", "ab cd ef5", + "ab cd ef6", "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0"}; char string2B[20][10] = {"ab cd ef1", "ab cd ef2", "ab cd ef3", "ab cd ef4", "ab cd ef5", - "ab cd ef6", "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0", - "ab cd ef1", "ab cd ef2", "ab cd 3", "ab cd 4", "ab cd 5", - "ab cd 6", "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0"}; + "ab cd ef6", "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0", + "ab cd ef1", "ab cd ef2", "ab cd 3", "ab cd 4", "ab cd 5", + "ab cd 6", "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0"}; hsize_t dims3[] = {27}; char string3A[27][6] = {"abcd0", "abcd1", "abcd2", "abcd3", "abcd4", "abcd5", "abcd6", "abcd7", "abcd8", @@ -5885,9 +5885,9 @@ write_attr_strings(hid_t loc_id, const char *dset_name, hid_t fid, /* create 3D attributes with dimension [4][3][2], 24 elements */ hsize_t dims3[3] = {4, 3, 2}; char buf13[4][3][2][STR_SIZE] = {{{"ab", "cd"}, {"ef", "gh"}, {"ij", "kl"}}, - {{"mn", "pq"}, {"rs", "tu"}, {"vw", "xz"}}, - {{"AB", "CD"}, {"EF", "GH"}, {"IJ", "KL"}}, - {{"MN", "PQ"}, {"RS", "TU"}, {"VW", "XZ"}}}; /* string */ + {{"mn", "pq"}, {"rs", "tu"}, {"vw", "xz"}}, + {{"AB", "CD"}, {"EF", "GH"}, {"IJ", "KL"}}, + {{"MN", "PQ"}, {"RS", "TU"}, {"VW", "XZ"}}}; /* string */ char *buf13a[4][3][2]; /* VL string */ char buf23[4][3][2]; /* bitfield, opaque */ s_t buf33[4][3][2]; /* compound */ @@ -6874,9 +6874,9 @@ write_attr_in(hid_t loc_id, const char *dset_name, hid_t fid, /* create 3D attributes with dimension [4][3][2], 24 elements */ hsize_t dims3[3] = {4, 3, 2}; char buf13[4][3][2][STR_SIZE] = {{{"ab", "cd"}, {"ef", "gh"}, {"ij", "kl"}}, - {{"mn", "pq"}, {"rs", "tu"}, {"vw", "xz"}}, - {{"AB", "CD"}, {"EF", "GH"}, {"IJ", "KL"}}, - {{"MN", "PQ"}, {"RS", "TU"}, {"VW", "XZ"}}}; /* string */ + {{"mn", "pq"}, {"rs", "tu"}, {"vw", "xz"}}, + {{"AB", "CD"}, {"EF", "GH"}, {"IJ", "KL"}}, + {{"MN", "PQ"}, {"RS", "TU"}, {"VW", "XZ"}}}; /* string */ char *buf13a[4][3][2]; /* VL string */ char buf23[4][3][2]; /* bitfield, opaque */ s_t buf33[4][3][2]; /* compound */ @@ -7865,9 +7865,9 @@ write_dset_in(hid_t loc_id, const char *dset_name, hid_t fid, /* create 3D attributes with dimension [4][3][2], 24 elements */ hsize_t dims3[3] = {4, 3, 2}; char buf13[4][3][2][STR_SIZE] = {{{"ab", "cd"}, {"ef", "gh"}, {"ij", "kl"}}, - {{"mn", "pq"}, {"rs", "tu"}, {"vw", "xz"}}, - {{"AB", "CD"}, {"EF", "GH"}, {"IJ", "KL"}}, - {{"MN", "PQ"}, {"RS", "TU"}, {"VW", "XZ"}}}; /* string */ + {{"mn", "pq"}, {"rs", "tu"}, {"vw", "xz"}}, + {{"AB", "CD"}, {"EF", "GH"}, {"IJ", "KL"}}, + {{"MN", "PQ"}, {"RS", "TU"}, {"VW", "XZ"}}}; /* string */ char *buf13a[4][3][2]; /* VL string */ char buf23[4][3][2]; /* bitfield, opaque */ s_t buf33[4][3][2]; /* compound */ diff --git a/tools/test/h5dump/h5dumpgentest.c b/tools/test/h5dump/h5dumpgentest.c index 55014aeeecf..1af7d721254 100644 --- a/tools/test/h5dump/h5dumpgentest.c +++ b/tools/test/h5dump/h5dumpgentest.c @@ -1797,14 +1797,14 @@ gent_str(void) hsize_t dims2[] = {20}; char string2[20][10] = {"ab cd ef1", "ab cd ef2", "ab cd ef3", "ab cd ef4", "ab cd ef5", - "ab cd ef6", "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0", - "ab cd ef1", "ab cd ef2", "ab cd ef3", "ab cd ef4", "ab cd ef5", - "ab cd ef6", "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0"}; + "ab cd ef6", "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0", + "ab cd ef1", "ab cd ef2", "ab cd ef3", "ab cd ef4", "ab cd ef5", + "ab cd ef6", "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0"}; hsize_t dims3[] = {27}; char string3[27][6] = {"abcd0", "abcd1", "abcd2", "abcd3", "abcd4", "abcd5", "abcd6", "abcd7", "abcd8", - "abcd9", "abcd0", "abcd1", "abcd2", "abcd3", "abcd4", "abcd5", "abcd6", "abcd7", - "abcd8", "abcd9", "abcd0", "abcd1", "abcd2", "abcd3", "abcd4", "abcd5", "abcd6"}; + "abcd9", "abcd0", "abcd1", "abcd2", "abcd3", "abcd4", "abcd5", "abcd6", "abcd7", + "abcd8", "abcd9", "abcd0", "abcd1", "abcd2", "abcd3", "abcd4", "abcd5", "abcd6"}; int i, j, k, l; @@ -2097,7 +2097,7 @@ gent_enum(void) hid_t file, type, space, dset; int val; enumtype data[] = {RED, GREEN, BLUE, GREEN, WHITE, WHITE, BLACK, GREEN, BLUE, RED, - RED, BLUE, GREEN, BLACK, WHITE, RED, WHITE, GREEN, GREEN, BLUE}; + RED, BLUE, GREEN, BLACK, WHITE, RED, WHITE, GREEN, GREEN, BLUE}; hsize_t size[1] = {NELMTS(data)}; file = H5Fcreate(FILE15, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); @@ -7263,31 +7263,31 @@ gent_packedbits(void) struct { uint8_t arr[F66_XDIM][F66_YDIM8]; - } * dsetu8; + } *dsetu8; struct { uint16_t arr[F66_XDIM][F66_YDIM16]; - } * dsetu16; + } *dsetu16; struct { uint32_t arr[F66_XDIM][F66_YDIM32]; - } * dsetu32; + } *dsetu32; struct { uint64_t arr[F66_XDIM][F66_YDIM64]; - } * dsetu64; + } *dsetu64; struct { int8_t arr[F66_XDIM][F66_YDIM8]; - } * dset8; + } *dset8; struct { int16_t arr[F66_XDIM][F66_YDIM16]; - } * dset16; + } *dset16; struct { int32_t arr[F66_XDIM][F66_YDIM32]; - } * dset32; + } *dset32; struct { int64_t arr[F66_XDIM][F66_YDIM64]; - } * dset64; + } *dset64; struct { double arr[F66_XDIM][F66_YDIM8]; - } * dsetdbl; + } *dsetdbl; uint8_t valu8bits; uint16_t valu16bits; @@ -7506,31 +7506,31 @@ gent_attr_intsize(void) struct { uint8_t arr[F66_XDIM][F66_YDIM8]; - } * dsetu8; + } *dsetu8; struct { uint16_t arr[F66_XDIM][F66_YDIM16]; - } * dsetu16; + } *dsetu16; struct { uint32_t arr[F66_XDIM][F66_YDIM32]; - } * dsetu32; + } *dsetu32; struct { uint64_t arr[F66_XDIM][F66_YDIM64]; - } * dsetu64; + } *dsetu64; struct { int8_t arr[F66_XDIM][F66_YDIM8]; - } * dset8; + } *dset8; struct { int16_t arr[F66_XDIM][F66_YDIM16]; - } * dset16; + } *dset16; struct { int32_t arr[F66_XDIM][F66_YDIM64]; - } * dset32; + } *dset32; struct { int64_t arr[F66_XDIM][F66_YDIM64]; - } * dset64; + } *dset64; struct { double arr[F66_XDIM][F66_YDIM8]; - } * dsetdbl; + } *dsetdbl; uint8_t valu8bits; uint16_t valu16bits; @@ -8621,31 +8621,31 @@ gent_intscalars(void) struct { uint8_t arr[F73_XDIM][F73_YDIM8]; - } * dsetu8; + } *dsetu8; struct { uint16_t arr[F73_XDIM][F73_YDIM16]; - } * dsetu16; + } *dsetu16; struct { uint32_t arr[F73_XDIM][F73_YDIM32]; - } * dsetu32; + } *dsetu32; struct { uint64_t arr[F73_XDIM][F73_YDIM64]; - } * dsetu64; + } *dsetu64; struct { int8_t arr[F73_XDIM][F73_YDIM8]; - } * dset8; + } *dset8; struct { int16_t arr[F73_XDIM][F73_YDIM16]; - } * dset16; + } *dset16; struct { int32_t arr[F73_XDIM][F73_YDIM32]; - } * dset32; + } *dset32; struct { int64_t arr[F73_XDIM][F73_YDIM64]; - } * dset64; + } *dset64; struct { double arr[F73_XDIM][F73_YDIM8]; - } * dsetdbl; + } *dsetdbl; uint8_t valu8bits; uint16_t valu16bits; @@ -8882,31 +8882,31 @@ gent_attr_intscalars(void) struct { uint8_t arr[F73_XDIM][F73_YDIM8]; - } * dsetu8; + } *dsetu8; struct { uint16_t arr[F73_XDIM][F73_YDIM16]; - } * dsetu16; + } *dsetu16; struct { uint32_t arr[F73_XDIM][F73_YDIM32]; - } * dsetu32; + } *dsetu32; struct { uint64_t arr[F73_XDIM][F73_YDIM64]; - } * dsetu64; + } *dsetu64; struct { int8_t arr[F73_XDIM][F73_YDIM8]; - } * dset8; + } *dset8; struct { int16_t arr[F73_XDIM][F73_YDIM16]; - } * dset16; + } *dset16; struct { int32_t arr[F73_XDIM][F73_YDIM32]; - } * dset32; + } *dset32; struct { int64_t arr[F73_XDIM][F73_YDIM64]; - } * dset64; + } *dset64; struct { double arr[F73_XDIM][F73_YDIM8]; - } * dsetdbl; + } *dsetdbl; uint8_t valu8bits; uint16_t valu16bits; @@ -9985,31 +9985,31 @@ gent_intsattrs(void) struct { uint8_t arr[F66_XDIM][F66_YDIM8]; - } * dsetu8; + } *dsetu8; struct { uint16_t arr[F66_XDIM][F66_YDIM16]; - } * dsetu16; + } *dsetu16; struct { uint32_t arr[F66_XDIM][F66_YDIM32]; - } * dsetu32; + } *dsetu32; struct { uint64_t arr[F66_XDIM][F66_YDIM64]; - } * dsetu64; + } *dsetu64; struct { int8_t arr[F66_XDIM][F66_YDIM8]; - } * dset8; + } *dset8; struct { int16_t arr[F66_XDIM][F66_YDIM16]; - } * dset16; + } *dset16; struct { int32_t arr[F66_XDIM][F66_YDIM32]; - } * dset32; + } *dset32; struct { int64_t arr[F66_XDIM][F66_YDIM64]; - } * dset64; + } *dset64; struct { double arr[F66_XDIM][F66_YDIM8]; - } * dsetdbl; + } *dsetdbl; uint8_t *asetu8 = NULL; uint16_t *asetu16 = NULL; @@ -10345,13 +10345,13 @@ gent_floatsattrs(void) struct { float arr[F89_XDIM][F89_YDIM32]; - } * dset32; + } *dset32; struct { double arr[F89_XDIM][F89_YDIM64]; - } * dset64; + } *dset64; struct { long double arr[F89_XDIM][F89_YDIM128]; - } * dset128; + } *dset128; float *aset32 = NULL; double *aset64 = NULL; @@ -10649,7 +10649,7 @@ gent_intsfourdims(void) hsize_t dims[F81_RANK]; struct { uint32_t arr[F81_ZDIM][F81_YDIM][F81_XDIM][F81_WDIM]; - } * dset1; + } *dset1; unsigned int i, j, k, l; fid = H5Fcreate(FILE81, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); @@ -10890,7 +10890,7 @@ gent_compound_complex2(void) buf[i].b[j] = (int)(j - i * 10); for (j = 0; j < dset_array_c_dims[0]; j++) for (k = 0; k < dset_array_c_dims[1]; k++) - buf[i].c[j][k] = (float)(j + k + i * 10) + (float)(j)*0.1F; + buf[i].c[j][k] = (float)(j + k + i * 10) + (float)(j) * 0.1F; /* Set up first nested compound */ buf[i].d.nested_a = (double)i; @@ -12025,7 +12025,7 @@ gent_float16(void) struct { H5__Float16 arr[F93_XDIM][F93_YDIM]; - } * dset16; + } *dset16; H5__Float16 *aset16 = NULL; H5__Float16 val16bits; @@ -12095,7 +12095,7 @@ gent_float16_be(void) struct { H5__Float16 arr[F94_XDIM][F94_YDIM]; - } * dset16; + } *dset16; H5__Float16 *aset16 = NULL; H5__Float16 val16bits; diff --git a/tools/test/h5repack/expected/h5repack-help.txt b/tools/test/h5repack/expected/h5repack-help.txt index 50242ef4853..718215260eb 100644 --- a/tools/test/h5repack/expected/h5repack-help.txt +++ b/tools/test/h5repack/expected/h5repack-help.txt @@ -93,7 +93,8 @@ usage: h5repack [OPTIONS] file1 file2 3: This is H5F_LIBVER_V112 in H5F_libver_t struct 4: This is H5F_LIBVER_V114 in H5F_libver_t struct 5: This is H5F_LIBVER_V116 in H5F_libver_t struct - (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V116 for this release + 6: This is H5F_LIBVER_V118 in H5F_libver_t struct + (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V118 for this release FS_STRATEGY is a string indicating the file space strategy used: FSM_AGGR: diff --git a/tools/test/h5repack/expected/h5repack_layout.h5-plugin_version_test.ddl b/tools/test/h5repack/expected/h5repack_layout.h5-plugin_version_test.ddl index d7aa1c3b0c5..fe10a5588df 100644 --- a/tools/test/h5repack/expected/h5repack_layout.h5-plugin_version_test.ddl +++ b/tools/test/h5repack/expected/h5repack_layout.h5-plugin_version_test.ddl @@ -11,7 +11,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 15 0 } + PARAMS { 9 1 17 0 } } } FILLVALUE { @@ -33,7 +33,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 15 0 } + PARAMS { 9 1 17 0 } } } FILLVALUE { @@ -55,7 +55,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 15 0 } + PARAMS { 9 1 17 0 } } } FILLVALUE { @@ -77,7 +77,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 15 0 } + PARAMS { 9 1 17 0 } } } FILLVALUE { @@ -99,7 +99,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 15 0 } + PARAMS { 9 1 17 0 } } } FILLVALUE { @@ -121,7 +121,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 15 0 } + PARAMS { 9 1 17 0 } } } FILLVALUE { @@ -143,7 +143,7 @@ GROUP "/" { USER_DEFINED_FILTER { FILTER_ID 260 COMMENT dynlib4 - PARAMS { 9 1 15 0 } + PARAMS { 9 1 17 0 } } } FILLVALUE { diff --git a/tools/test/misc/h5clear_gentest.c b/tools/test/misc/h5clear_gentest.c index 892e9740d27..5bcd91f9774 100644 --- a/tools/test/misc/h5clear_gentest.c +++ b/tools/test/misc/h5clear_gentest.c @@ -62,7 +62,7 @@ gen_cache_image_file(const char *fname) int i, j; /* Local index variables */ struct { int arr[50][100]; - } * buf; /* Buffer for data to write */ + } *buf; /* Buffer for data to write */ H5AC_cache_image_config_t cache_image_config = /* Cache image input configuration */ {H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION, true, false, H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE}; diff --git a/tools/test/misc/h5perf_gentest.c b/tools/test/misc/h5perf_gentest.c index 77569144e84..b3f5bb722b4 100644 --- a/tools/test/misc/h5perf_gentest.c +++ b/tools/test/misc/h5perf_gentest.c @@ -151,16 +151,16 @@ create_perf_test_file(const char *fname, int ngrps, int ndsets, int nattrs, hsiz size_t offset, len; herr_t status; char *names[NTYPES] = {"int", - "ulong", - "float", - "double", - "fixed string", - "enum", - "fixed float array", - "vlen int array", - "vlen strings"}; + "ulong", + "float", + "double", + "fixed string", + "enum", + "fixed float array", + "vlen int array", + "vlen strings"}; hid_t types[NTYPES] = {H5T_NATIVE_INT, H5T_NATIVE_UINT64, H5T_NATIVE_FLOAT, H5T_NATIVE_DOUBLE, tid_str, - tid_enum, tid_array_f, tid_vlen_i, tid_vlen_s}; + tid_enum, tid_array_f, tid_vlen_i, tid_vlen_s}; hsize_t coords[4][2] = {{0, 1}, {3, 5}, {1, 0}, {2, 4}}, start = 0, stride = 1, count = 1; if (nrows < NROWS)