Skip to content

Commit

Permalink
Committing clang-format changes
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] committed Jul 13, 2023
1 parent bedfe56 commit 79133da
Showing 1 changed file with 29 additions and 26 deletions.
55 changes: 29 additions & 26 deletions src/H5Dmpio.c
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,8 @@ static herr_t H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, hb
static herr_t H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, hbool_t *contig_type_derived,
MPI_Datatype *resized_type,
hbool_t *resized_type_derived);
static herr_t H5D__mpio_collective_filtered_vec_io(H5F_shared_t *f_sh, H5D_filtered_collective_io_info_t *chunk_list,
static herr_t H5D__mpio_collective_filtered_vec_io(H5F_shared_t *f_sh,
H5D_filtered_collective_io_info_t *chunk_list,
size_t num_entries, H5D_io_op_type_t op_type);
static int H5D__cmp_piece_addr(const void *chunk_addr_info1, const void *chunk_addr_info2);
static int H5D__cmp_filtered_collective_io_info_entry(const void *filtered_collective_io_info_entry1,
Expand Down Expand Up @@ -1764,13 +1765,13 @@ static herr_t
H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, int mpi_rank,
int mpi_size)
{
H5D_filtered_collective_io_info_t *chunk_list = NULL; /* The list of chunks being read/written */
H5D_filtered_collective_io_info_t *chunk_hash_table = NULL;
unsigned char **chunk_msg_bufs = NULL;
H5D_filtered_collective_io_info_t *chunk_list = NULL; /* The list of chunks being read/written */
H5D_filtered_collective_io_info_t *chunk_hash_table = NULL;
unsigned char **chunk_msg_bufs = NULL;
size_t *rank_chunks_assigned_map = NULL;
size_t chunk_list_num_entries;
int chunk_msg_bufs_len = 0;
herr_t ret_value = SUCCEED;
herr_t ret_value = SUCCEED;

FUNC_ENTER_PACKAGE_TAG(dset_info->dset->oloc.addr)

Expand Down Expand Up @@ -2284,10 +2285,9 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info
* the chunks. As chunk reads are done collectively here, all ranks
* must participate.
*/
if (H5D__mpio_collective_filtered_chunk_update(have_chunk_to_process ? &chunk_list[i] : NULL,
have_chunk_to_process ? 1 : 0, chunk_hash_table,
chunk_msg_bufs, chunk_msg_bufs_len, io_info,
dset_info, mpi_rank) < 0)
if (H5D__mpio_collective_filtered_chunk_update(
have_chunk_to_process ? &chunk_list[i] : NULL, have_chunk_to_process ? 1 : 0,
chunk_hash_table, chunk_msg_bufs, chunk_msg_bufs_len, io_info, dset_info, mpi_rank) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't update modified chunks")

/* All ranks now collectively re-allocate file space for all chunks */
Expand All @@ -2298,9 +2298,11 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info
"couldn't collectively re-allocate file space for chunks")

/* Perform vector I/O on chunks */
if (H5D__mpio_collective_filtered_vec_io(io_info->f_sh, have_chunk_to_process ? &chunk_list[i] : NULL,
if (H5D__mpio_collective_filtered_vec_io(io_info->f_sh,
have_chunk_to_process ? &chunk_list[i] : NULL,
have_chunk_to_process ? 1 : 0, io_info->op_type) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't perform vector I/O on filtered chunks")
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
"couldn't perform vector I/O on filtered chunks")

/* Free up resources in anticipation of following collective operation */
if (have_chunk_to_process && chunk_list[i].buf) {
Expand Down Expand Up @@ -4229,8 +4231,8 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch
H5D_fill_buf_info_t fb_info;
H5D_piece_info_t *chunk_info = NULL;
H5S_sel_iter_t *sel_iter = NULL; /* Dataspace selection iterator for H5D__scatter_mem */
H5Z_EDC_t err_detect; /* Error detection info */
H5Z_cb_t filter_cb; /* I/O filter callback function */
H5Z_EDC_t err_detect; /* Error detection info */
H5Z_cb_t filter_cb; /* I/O filter callback function */
hsize_t file_chunk_size = 0;
hsize_t iter_nelmts; /* Number of points to iterate over for the chunk IO operation */
hbool_t should_fill = FALSE;
Expand Down Expand Up @@ -5377,7 +5379,8 @@ H5D__mpio_collective_filtered_vec_io(H5F_shared_t *f_sh, H5D_filtered_collective

if (num_entries > 0) {
if (num_entries > UINT32_MAX)
HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "number of chunk entries in I/O operation exceeds UINT32_MAX")
HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL,
"number of chunk entries in I/O operation exceeds UINT32_MAX")

if (op_type == H5D_IO_OP_WRITE)
iovec_count = (uint32_t)num_entries;
Expand All @@ -5397,11 +5400,13 @@ H5D__mpio_collective_filtered_vec_io(H5F_shared_t *f_sh, H5D_filtered_collective

if (op_type == H5D_IO_OP_WRITE) {
if (NULL == (io_wbufs = H5MM_malloc(iovec_count * sizeof(*io_wbufs))))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate space for I/O buffers vector")
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
"couldn't allocate space for I/O buffers vector")
}
else {
if (NULL == (io_rbufs = H5MM_malloc(iovec_count * sizeof(*io_rbufs))))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate space for I/O buffers vector")
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
"couldn't allocate space for I/O buffers vector")
}

/*
Expand All @@ -5410,7 +5415,8 @@ H5D__mpio_collective_filtered_vec_io(H5F_shared_t *f_sh, H5D_filtered_collective
* are the same across the I/O vectors
*/
if (NULL == (io_types = H5MM_malloc(2 * sizeof(*io_types))))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate space for I/O memory types vector")
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
"couldn't allocate space for I/O memory types vector")
io_types[0] = H5FD_MEM_DRAW;
io_types[1] = H5FD_MEM_NOLIST;

Expand All @@ -5421,7 +5427,8 @@ H5D__mpio_collective_filtered_vec_io(H5F_shared_t *f_sh, H5D_filtered_collective
continue;

/* Set convenience pointer for current chunk block */
chunk_block = (op_type == H5D_IO_OP_READ) ? &chunk_list[i].chunk_current : &chunk_list[i].chunk_new;
chunk_block =
(op_type == H5D_IO_OP_READ) ? &chunk_list[i].chunk_current : &chunk_list[i].chunk_new;

assert(H5_addr_defined(chunk_block->offset));
io_addrs[vec_idx] = chunk_block->offset;
Expand All @@ -5448,16 +5455,12 @@ H5D__mpio_collective_filtered_vec_io(H5F_shared_t *f_sh, H5D_filtered_collective
}

if (op_type == H5D_IO_OP_WRITE) {
if (H5F_shared_vector_write(f_sh, iovec_count, io_types, io_addrs,
io_sizes, io_wbufs) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
"vector write call failed")
if (H5F_shared_vector_write(f_sh, iovec_count, io_types, io_addrs, io_sizes, io_wbufs) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "vector write call failed")
}
else {
if (H5F_shared_vector_read(f_sh, iovec_count, io_types, io_addrs,
io_sizes, io_rbufs) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL,
"vector read call failed")
if (H5F_shared_vector_read(f_sh, iovec_count, io_types, io_addrs, io_sizes, io_rbufs) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "vector read call failed")
}

done:
Expand Down

0 comments on commit 79133da

Please sign in to comment.