Skip to content

Commit

Permalink
Merge branch 'HDFGroup:develop' into develop_sel_io_test
Browse files Browse the repository at this point in the history
  • Loading branch information
vchoi-hdfgroup authored Oct 2, 2023
2 parents c57b347 + 4261552 commit fa795c4
Show file tree
Hide file tree
Showing 6 changed files with 43 additions and 42 deletions.
9 changes: 3 additions & 6 deletions src/H5CS.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,10 +228,9 @@ H5CS_pop(void)
H5CS_t *
H5CS_copy_stack(void)
{
H5CS_t *old_stack = H5CS_get_my_stack(); /* Existing function stack for library */
H5CS_t *new_stack; /* New function stack, for copy */
unsigned u; /* Local index variable */
H5CS_t *ret_value = NULL; /* Return value */
H5CS_t *old_stack = H5CS_get_my_stack(); /* Existing function stack for library */
H5CS_t *new_stack; /* New function stack, for copy */
H5CS_t *ret_value = NULL; /* Return value */

/* Don't push this function on the function stack... :-) */
FUNC_ENTER_NOAPI_NOFS
Expand Down Expand Up @@ -270,8 +269,6 @@ H5CS_copy_stack(void)
herr_t
H5CS_close_stack(H5CS_t *stack)
{
unsigned u; /* Local index variable */

/* Don't push this function on the function stack... :-) */
FUNC_ENTER_NOAPI_NOERR_NOFS

Expand Down
8 changes: 3 additions & 5 deletions src/H5Cimage.c
Original file line number Diff line number Diff line change
Expand Up @@ -1096,8 +1096,6 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_image_ctl

/* Sanity checks */
assert(f);
assert(f->shared);
assert(f->shared->cache == f->shared->cache);

/* Check arguments */
if (cache_ptr == NULL)
Expand All @@ -1117,7 +1115,7 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_image_ctl

cache_ptr->image_ctl = default_image_ctl;
assert(!(cache_ptr->image_ctl.generate_image));
} /* end if */
}
else {
#endif /* H5_HAVE_PARALLEL */
/* A cache image can only be generated if the file is opened read / write
Expand All @@ -1139,9 +1137,9 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_image_ctl

cache_ptr->image_ctl = default_image_ctl;
assert(!(cache_ptr->image_ctl.generate_image));
} /* end else */
}
#ifdef H5_HAVE_PARALLEL
} /* end else */
}
#endif /* H5_HAVE_PARALLEL */

done:
Expand Down
4 changes: 2 additions & 2 deletions src/H5FDmpio.c
Original file line number Diff line number Diff line change
Expand Up @@ -2694,7 +2694,7 @@ H5FD__mpio_write_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t co
*/
file->eof = HADDR_UNDEF;

/* check to see if the local eof has changed been extended, and update if so */
/* check to see if the local eof has been extended, and update if so */
if (max_addr > file->local_eof)
file->local_eof = max_addr;

Expand Down Expand Up @@ -3744,7 +3744,7 @@ H5FD__mpio_truncate(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, bool H5_ATTR_UN

/* In principle, it is possible for the size returned by the
* call to MPI_File_get_size() to depend on whether writes from
* all proceeses have completed at the time process 0 makes the
* all processes have completed at the time process 0 makes the
* call.
*
* In practice, most (all?) truncate calls will come after a barrier
Expand Down
10 changes: 8 additions & 2 deletions test/h5test.c
Original file line number Diff line number Diff line change
Expand Up @@ -480,8 +480,14 @@ h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fu
if (H5FD_FAMILY == driver) {
if (subst_for_superblock)
suffix = "-000000.h5";
else
suffix = nest_printf ? "-%%06d.h5" : "-%06d.h5";
else {
if (nest_printf) {
suffix = "-%%06d.h5";
}
else {
suffix = "-%06d.h5";
}
}
}
else if (H5FD_MULTI == driver) {

Expand Down
30 changes: 15 additions & 15 deletions testpar/API/t_span_tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
one in collective mode,
2) We will read two datasets with the same hyperslab selection settings,
1. independent read to read independent output,
independent read to read collecive output,
independent read to read collective output,
Compare the result,
If the result is the same, then collective write succeeds.
2. collective read to read independent output,
Expand Down Expand Up @@ -498,7 +498,7 @@ coll_write_test(int chunk_factor)
For testing collective hyperslab selection write
In this test, we are using independent read to check
the correctedness of collective write compared with
the correctness of collective write compared with
independent write,
In order to thoroughly test this feature, we choose
Expand Down Expand Up @@ -593,7 +593,7 @@ coll_write_test(int chunk_factor)
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);

/*
* Select two hyperslabs in memory. Hyperslabs has the same
* Select two hyperslabs in memory. Hyperslabs have the same
* size and shape as the selected hyperslabs for the file dataspace
* Only the starting point is different.
* The first selection
Expand Down Expand Up @@ -833,7 +833,7 @@ coll_read_test(void)
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);

/*
* Select two hyperslabs in memory. Hyperslabs has the same
* Select two hyperslabs in memory. Hyperslabs have the same
* size and shape as the selected hyperslabs for the file dataspace.
* Only the starting point is different.
* The first selection
Expand Down Expand Up @@ -967,7 +967,7 @@ coll_read_test(void)
** sel_rank fastest changing indices, with origin (in the
** higher indices) as indicated by the start array.
**
** Note that this function, is hard coded to presume a
** Note that this function is hard-coded to presume a
** maximum dataspace rank of 5.
**
** While this maximum is declared as a constant, increasing
Expand Down Expand Up @@ -1045,7 +1045,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
* Note that the following computation depends on the C99
* requirement that integer division discard any fraction
* (truncation towards zero) to function correctly. As we
* now require C99, this shouldn't be a problem, but noting
* now require C99, this shouldn't be a problem, but note
* it may save us some pain if we are ever obliged to support
* pre-C99 compilers again.
*/
Expand Down Expand Up @@ -1074,7 +1074,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t

/* Now set up the stride and block arrays, and portions of the start
* and count arrays that will not be altered during the selection of
* the checker board.
* the checkerboard.
*/
i = 0;
while (i < ds_offset) {
Expand Down Expand Up @@ -1294,13 +1294,13 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
** expected data. Return true if it does, and false
** otherwise.
**
** The supplied buffer is presumed to this process's slice
** The supplied buffer is presumed to be this process's slice
** of the target data set. Each such slice will be an
** n-cube of rank (rank -1) and the supplied edge_size with
** origin (mpi_rank, 0, ... , 0) in the target data set.
**
** Further, the buffer is presumed to be the result of reading
** or writing a checker board selection of an m (1 <= m <
** or writing a checkerboard selection of an m (1 <= m <
** rank) dimensional slice through this processes slice
** of the target data set. Also, this slice must be parallel
** to the fastest changing indices.
Expand All @@ -1311,15 +1311,15 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
** with the natural numbers listed in order from the origin
** along the fastest changing axis.
**
** Thus for a 20x10x10 dataset, the value stored in location
** Thus, for a 20x10x10 dataset, the value stored in location
** (x, y, z) (assuming that z is the fastest changing index
** and x the slowest) is assumed to be:
**
** (10 * 10 * x) + (10 * y) + z
**
** Further, supposing that this is process 10, this process's
** slice of the dataset would be a 10 x 10 2-cube with origin
** (10, 0, 0) in the data set, and would be initialize (prior
** (10, 0, 0) in the data set, and would be initialized (prior
** to the checkerboard selection) as follows:
**
** 1000, 1001, 1002, ... 1008, 1009
Expand Down Expand Up @@ -2398,11 +2398,11 @@ lower_dim_size_comp_test(void)
*
* 1) Reads or writes exactly one chunk,
*
* 2) Has no in memory buffer for any other chunk.
* 2) Has no in-memory buffer for any other chunk.
*
* The test differers from Rob Latham's bug report in
* that is runs with an arbitrary number of proceeses,
* and uses a 1 dimensional dataset.
* The test differs from Rob Latham's bug report in
* that it runs with an arbitrary number of processes,
* and uses a 1-dimensional dataset.
*
* Return: void
*-------------------------------------------------------------------------
Expand Down
24 changes: 12 additions & 12 deletions testpar/t_span_tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ coll_write_test(int chunk_factor)
For testing collective hyperslab selection write
In this test, we are using independent read to check
the correctedness of collective write compared with
the correctness of collective write compared with
independent write,
In order to thoroughly test this feature, we choose
Expand Down Expand Up @@ -494,7 +494,7 @@ coll_write_test(int chunk_factor)
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);

/*
* Select two hyperslabs in memory. Hyperslabs has the same
* Select two hyperslabs in memory. Hyperslabs have the same
* size and shape as the selected hyperslabs for the file dataspace
* Only the starting point is different.
* The first selection
Expand Down Expand Up @@ -734,7 +734,7 @@ coll_read_test(void)
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);

/*
* Select two hyperslabs in memory. Hyperslabs has the same
* Select two hyperslabs in memory. Hyperslabs have the same
* size and shape as the selected hyperslabs for the file dataspace.
* Only the starting point is different.
* The first selection
Expand Down Expand Up @@ -868,7 +868,7 @@ coll_read_test(void)
** sel_rank fastest changing indices, with origin (in the
** higher indices) as indicated by the start array.
**
** Note that this function, is hard coded to presume a
** Note that this function is hard-coded to presume a
** maximum dataspace rank of 5.
**
** While this maximum is declared as a constant, increasing
Expand Down Expand Up @@ -946,7 +946,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
* Note that the following computation depends on the C99
* requirement that integer division discard any fraction
* (truncation towards zero) to function correctly. As we
* now require C99, this shouldn't be a problem, but noting
* now require C99, this shouldn't be a problem, but note
* it may save us some pain if we are ever obliged to support
* pre-C99 compilers again.
*/
Expand Down Expand Up @@ -975,7 +975,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t

/* Now set up the stride and block arrays, and portions of the start
* and count arrays that will not be altered during the selection of
* the checker board.
* the checkerboard.
*/
i = 0;
while (i < ds_offset) {
Expand Down Expand Up @@ -1195,13 +1195,13 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
** expected data. Return true if it does, and false
** otherwise.
**
** The supplied buffer is presumed to this process's slice
** The supplied buffer is presumed to be this process's slice
** of the target data set. Each such slice will be an
** n-cube of rank (rank -1) and the supplied edge_size with
** origin (mpi_rank, 0, ... , 0) in the target data set.
**
** Further, the buffer is presumed to be the result of reading
** or writing a checker board selection of an m (1 <= m <
** or writing a checkerboard selection of an m (1 <= m <
** rank) dimensional slice through this processes slice
** of the target data set. Also, this slice must be parallel
** to the fastest changing indices.
Expand All @@ -1220,7 +1220,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
**
** Further, supposing that this is process 10, this process's
** slice of the dataset would be a 10 x 10 2-cube with origin
** (10, 0, 0) in the data set, and would be initialize (prior
** (10, 0, 0) in the data set, and would be initialized (prior
** to the checkerboard selection) as follows:
**
** 1000, 1001, 1002, ... 1008, 1009
Expand Down Expand Up @@ -2285,9 +2285,9 @@ lower_dim_size_comp_test(void)
*
* 2) Has no in memory buffer for any other chunk.
*
* The test differers from Rob Latham's bug report in
* that is runs with an arbitrary number of proceeses,
* and uses a 1 dimensional dataset.
* The test differs from Rob Latham's bug report in
* that it runs with an arbitrary number of processes,
* and uses a 1-dimensional dataset.
*
* Return: void
*
Expand Down

0 comments on commit fa795c4

Please sign in to comment.