Skip to content
This repository was archived by the owner on Sep 30, 2022. It is now read-only.

Topic/v2.x/pmpi vs mpi #669

Merged
merged 12 commits into from
Dec 14, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
21 changes: 0 additions & 21 deletions config/ompi_configure_options.m4
Original file line number Diff line number Diff line change
Expand Up @@ -166,27 +166,6 @@ case "x$enable_mpi_fortran" in
;;
esac

#
# MPI profiling
#

AC_MSG_CHECKING([if want PMPI])
AC_ARG_ENABLE(mpi-profile,
AC_HELP_STRING([--enable-mpi-profile],
[enable MPI profiling (default: enabled)]))
if test "$enable_mpi_profile" != "no"; then
AC_MSG_RESULT([yes])
WANT_MPI_PROFILING=1
MPIF_H_PMPI_W_FUNCS=", PMPI_WTICK, PMPI_WTIME"
else
AC_MSG_RESULT([no])
WANT_MPI_PROFILING=0
MPIF_H_PMPI_W_FUNCS=
fi
AC_SUBST(MPIF_H_PMPI_W_FUNCS)
AM_CONDITIONAL(WANT_MPI_PROFILING, test "$WANT_MPI_PROFILING" = 1)


#
# C++
#
Expand Down
7 changes: 3 additions & 4 deletions config/ompi_setup_mpi_fortran.m4
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ AC_DEFUN([OMPI_SETUP_MPI_FORTRAN],[

# We need to have ignore TKR functionality to build the mpi_f08
# module
AS_IF([test $OMPI_TRY_FORTRAN_BINDINGS -ge $OMPI_FORTRAN_USEMPIF08_BINDINGS &&
AS_IF([test $OMPI_TRY_FORTRAN_BINDINGS -ge $OMPI_FORTRAN_USEMPIF08_BINDINGS && \
test $OMPI_FORTRAN_HAVE_IGNORE_TKR -eq 1],
[OMPI_BUILD_FORTRAN_BINDINGS=$OMPI_FORTRAN_USEMPIF08_BINDINGS
OMPI_FORTRAN_F08_PREDECL=$OMPI_FORTRAN_IGNORE_TKR_PREDECL
Expand Down Expand Up @@ -635,11 +635,10 @@ end type test_mpi_handle],
# these layers need to be built or NOT

AM_CONDITIONAL(BUILD_MPI_FORTRAN_MPIFH_BINDINGS_LAYER,
[( test $WANT_MPI_PROFILING -eq 0 || test $OMPI_PROFILING_COMPILE_SEPARATELY -eq 1 ) && \
[test $OMPI_PROFILING_COMPILE_SEPARATELY -eq 1 && \
test $OMPI_BUILD_FORTRAN_BINDINGS -gt $OMPI_FORTRAN_NO_BINDINGS])
AM_CONDITIONAL(BUILD_PMPI_FORTRAN_MPIFH_BINDINGS_LAYER,
[test $OMPI_BUILD_FORTRAN_BINDINGS -gt $OMPI_FORTRAN_NO_BINDINGS && \
test $WANT_MPI_PROFILING -eq 1])
[test $OMPI_BUILD_FORTRAN_BINDINGS -gt $OMPI_FORTRAN_NO_BINDINGS])
AM_CONDITIONAL(OMPI_BUILD_FORTRAN_MPIFH_BINDINGS,
[test $OMPI_BUILD_FORTRAN_BINDINGS -gt $OMPI_FORTRAN_NO_BINDINGS])

Expand Down
8 changes: 3 additions & 5 deletions config/ompi_setup_mpi_profiling.m4
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
# Copyright (c) 2006-2007 Los Alamos National Security, LLC. All rights
# reserved.
# Copyright (c) 2009 Oak Ridge National Labs. All rights reserved.
# Copyright (c) 2015 Research Organization for Information Science
# and Technology (RIST). All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
Expand All @@ -39,14 +41,10 @@ AC_DEFUN([OMPI_SETUP_MPI_PROFILING],[
#

AM_CONDITIONAL(BUILD_MPI_BINDINGS_LAYER,
test "$WANT_MPI_PROFILING" = 0 -o "$OMPI_PROFILING_COMPILE_SEPARATELY" = 1)
test "$OMPI_PROFILING_COMPILE_SEPARATELY" = 1)

AM_CONDITIONAL(BUILD_PMPI_BINDINGS_LAYER,
test "$WANT_MPI_PROFILING" = 1)
AM_CONDITIONAL(COMPILE_PROFILING_SEPARATELY,
test "$OMPI_PROFILING_COMPILE_SEPARATELY" = 1)
AC_DEFINE_UNQUOTED(OMPI_ENABLE_MPI_PROFILING, $WANT_MPI_PROFILING,
[Whether we want MPI profiling or not])
AC_DEFINE_UNQUOTED(OPAL_HAVE_WEAK_SYMBOLS, $OPAL_C_HAVE_WEAK_SYMBOLS,
[Whether we have weak symbols or not])
])
18 changes: 4 additions & 14 deletions configure.ac
Original file line number Diff line number Diff line change
Expand Up @@ -495,14 +495,10 @@ fi
if test "$WANT_WEAK_SYMBOLS" = "0"; then
OPAL_C_HAVE_WEAK_SYMBOLS=0
fi
if test "$WANT_MPI_PROFILING" = "1"; then
if test "$OPAL_C_HAVE_WEAK_SYMBOLS" = "1"; then
OMPI_PROFILING_COMPILE_SEPARATELY=0
else
OMPI_PROFILING_COMPILE_SEPARATELY=1
fi
else
if test "$OPAL_C_HAVE_WEAK_SYMBOLS" = "1"; then
OMPI_PROFILING_COMPILE_SEPARATELY=0
else
OMPI_PROFILING_COMPILE_SEPARATELY=1
fi

# Check if we support the offsetof compiler directive
Expand Down Expand Up @@ -547,13 +543,7 @@ m4_ifdef([project_ompi], [OMPI_SETUP_JAVA_BINDINGS])
##################################

# Setup profiling bindings (if we're building the relevant projects).
# Note that opal_wrapper.c has a hard-coded use of the
# OMPI_ENABLE_MPI_PROFILING macro, so we need to define it (to 0) even
# if we're not building the OMPI project.

m4_ifdef([project_ompi], [OMPI_SETUP_MPI_PROFILING],
[AC_DEFINE([OMPI_ENABLE_MPI_PROFILING], [0],
[We are not building OMPI, so no profiling])])
m4_ifdef([project_ompi], [OMPI_SETUP_MPI_PROFILING])
m4_ifdef([project_oshmem], [OSHMEM_SETUP_PROFILING])


Expand Down
6 changes: 2 additions & 4 deletions ompi/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
# Copyright (c) 2013-2015 Los Alamos National Security, LLC. All rights
# reserved.
# Copyright (c) 2015 Intel, Inc. All rights reserved.
# Copyright (c) 2015 Research Organization for Information Science
# and Technology (RIST). All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
Expand All @@ -29,11 +31,7 @@ c_mpi_lib = mpi/c/libmpi_c_mpi.la mpi/tool/libmpi_mpit.la
else
c_mpi_lib =
endif
if BUILD_PMPI_BINDINGS_LAYER
c_pmpi_lib = mpi/c/profile/libmpi_c_pmpi.la mpi/tool/profile/libmpi_pmpit.la
else
c_pmpi_lib =
endif

# See if we have Fortran mpif.h MPI bindings

Expand Down
6 changes: 3 additions & 3 deletions ompi/mca/coll/libnbc/nbc_iallgather.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ int ompi_coll_libnbc_iallgather(const void* sendbuf, int sendcount, MPI_Datatype
rank = ompi_comm_rank (comm);
p = ompi_comm_size (comm);

res = MPI_Type_extent(recvtype, &rcvext);
res = ompi_datatype_type_extent(recvtype, &rcvext);
if (MPI_SUCCESS != res) {
return res;
}
Expand Down Expand Up @@ -175,9 +175,9 @@ int ompi_coll_libnbc_iallgather_inter(const void* sendbuf, int sendcount, MPI_Da
NBC_Handle *handle;
ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;

res = MPI_Type_extent(recvtype, &rcvext);
res = ompi_datatype_type_extent(recvtype, &rcvext);
if (MPI_SUCCESS != res) {
NBC_Error ("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error ("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

Expand Down
8 changes: 4 additions & 4 deletions ompi/mca/coll/libnbc/nbc_iallgatherv.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,9 @@ int ompi_coll_libnbc_iallgatherv(const void* sendbuf, int sendcount, MPI_Datatyp
rank = ompi_comm_rank (comm);
p = ompi_comm_size (comm);

res = MPI_Type_extent (recvtype, &rcvext);
res = ompi_datatype_type_extent (recvtype, &rcvext);
if (OPAL_UNLIKELY(MPI_SUCCESS != res)) {
NBC_Error ("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error ("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

Expand Down Expand Up @@ -124,9 +124,9 @@ int ompi_coll_libnbc_iallgatherv_inter(const void* sendbuf, int sendcount, MPI_D

rsize = ompi_comm_remote_size (comm);

res = MPI_Type_extent(recvtype, &rcvext);
res = ompi_datatype_type_extent(recvtype, &rcvext);
if (OPAL_UNLIKELY(MPI_SUCCESS != res)) {
NBC_Error ("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error ("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

Expand Down
15 changes: 8 additions & 7 deletions ompi/mca/coll/libnbc/nbc_iallreduce.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,13 +70,13 @@ int ompi_coll_libnbc_iallreduce(const void* sendbuf, void* recvbuf, int count, M

res = ompi_datatype_get_extent(datatype, &lb, &ext);
if (OMPI_SUCCESS != res) {
NBC_Error ("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error ("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

res = ompi_datatype_type_size (datatype, &size);
if (OMPI_SUCCESS != res) {
NBC_Error ("MPI Error in MPI_Type_size() (%i)", res);
NBC_Error ("MPI Error in ompi_datatype_type_size() (%i)", res);
return res;
}

Expand Down Expand Up @@ -193,7 +193,8 @@ int ompi_coll_libnbc_iallreduce_inter(const void* sendbuf, void* recvbuf, int co
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_1_0_t *module)
{
int rank, res, size, rsize;
int rank, res, rsize;
size_t size;
MPI_Aint ext;
NBC_Schedule *schedule;
NBC_Handle *handle;
Expand All @@ -202,15 +203,15 @@ int ompi_coll_libnbc_iallreduce_inter(const void* sendbuf, void* recvbuf, int co
rank = ompi_comm_rank (comm);
rsize = ompi_comm_remote_size (comm);

res = MPI_Type_extent(datatype, &ext);
res = ompi_datatype_type_extent(datatype, &ext);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

res = MPI_Type_size(datatype, &size);
res = ompi_datatype_type_size(datatype, &size);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Type_size() (%i)", res);
NBC_Error("MPI Error in ompi_datatype_type_size() (%i)", res);
return res;
}

Expand Down
41 changes: 21 additions & 20 deletions ompi/mca/coll/libnbc/nbc_ialltoall.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ int ompi_coll_libnbc_ialltoall(const void* sendbuf, int sendcount, MPI_Datatype
MPI_Datatype recvtype, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_1_0_t *module)
{
int rank, p, res, a2asize, sndsize, datasize;
int rank, p, res, datasize;
size_t a2asize, sndsize;
NBC_Schedule *schedule;
MPI_Aint rcvext, sndext;
#ifdef NBC_CACHE_SCHEDULE
Expand All @@ -67,21 +68,21 @@ int ompi_coll_libnbc_ialltoall(const void* sendbuf, int sendcount, MPI_Datatype
rank = ompi_comm_rank (comm);
p = ompi_comm_size (comm);

res = MPI_Type_extent(sendtype, &sndext);
res = ompi_datatype_type_extent(sendtype, &sndext);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

res = MPI_Type_extent(recvtype, &rcvext);
res = ompi_datatype_type_extent(recvtype, &rcvext);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

res = MPI_Type_size(sendtype, &sndsize);
res = ompi_datatype_type_size(sendtype, &sndsize);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Type_size() (%i)", res);
NBC_Error("MPI Error in ompi_datatype_type_size() (%i)", res);
return res;
}

Expand All @@ -93,7 +94,7 @@ int ompi_coll_libnbc_ialltoall(const void* sendbuf, int sendcount, MPI_Datatype
* total communicated size is smaller than 1<<17 *and* if we don't
* have eager messages (msgsize < 1<<13) */
alg = NBC_A2A_LINEAR;
} else if(a2asize < (1<<12)*p) {
} else if(a2asize < (1<<12)*(unsigned int)p) {
/*alg = NBC_A2A_DISS;*/
alg = NBC_A2A_LINEAR;
} else
Expand All @@ -120,9 +121,9 @@ int ompi_coll_libnbc_ialltoall(const void* sendbuf, int sendcount, MPI_Datatype
if(NBC_Type_intrinsic(sendtype)) {
datasize = sndext * sendcount;
} else {
res = MPI_Pack_size (sendcount, sendtype, comm, &datasize);
res = PMPI_Pack_size (sendcount, sendtype, comm, &datasize);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Pack_size() (%i)", res);
NBC_Error("MPI Error in PMPI_Pack_size() (%i)", res);
NBC_Return_handle (handle);
return res;
}
Expand Down Expand Up @@ -156,20 +157,20 @@ int ompi_coll_libnbc_ialltoall(const void* sendbuf, int sendcount, MPI_Datatype
int pos=0;

/* non-contiguous - pack */
res = MPI_Pack ((char *) sendbuf + rank * sendcount * sndext, (p - rank) * sendcount, sendtype, handle->tmpbuf,
res = PMPI_Pack ((char *) sendbuf + rank * sendcount * sndext, (p - rank) * sendcount, sendtype, handle->tmpbuf,
(p - rank) * datasize, &pos, comm);
if (OPAL_UNLIKELY(MPI_SUCCESS != res)) {
NBC_Error("MPI Error in MPI_Pack() (%i)", res);
NBC_Error("MPI Error in PMPI_Pack() (%i)", res);
NBC_Return_handle (handle);
return res;
}

if (rank != 0) {
pos = 0;
res = MPI_Pack(sendbuf, rank * sendcount, sendtype, (char *) handle->tmpbuf + datasize * (p - rank),
res = PMPI_Pack(sendbuf, rank * sendcount, sendtype, (char *) handle->tmpbuf + datasize * (p - rank),
rank * datasize, &pos, comm);
if (OPAL_UNLIKELY(MPI_SUCCESS != res)) {
NBC_Error("MPI Error in MPI_Pack() (%i)", res);
NBC_Error("MPI Error in PMPI_Pack() (%i)", res);
NBC_Return_handle (handle);
return res;
}
Expand Down Expand Up @@ -277,15 +278,15 @@ int ompi_coll_libnbc_ialltoall_inter (const void* sendbuf, int sendcount, MPI_Da

rsize = ompi_comm_remote_size (comm);

res = MPI_Type_extent (sendtype, &sndext);
res = ompi_datatype_type_extent (sendtype, &sndext);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

res = MPI_Type_extent (recvtype, &rcvext);
res = ompi_datatype_type_extent (recvtype, &rcvext);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

Expand Down Expand Up @@ -415,9 +416,9 @@ static inline int a2a_sched_diss(int rank, int p, MPI_Aint sndext, MPI_Aint rcve
if(NBC_Type_intrinsic(sendtype)) {
datasize = sndext*sendcount;
} else {
res = MPI_Pack_size(sendcount, sendtype, comm, &datasize);
res = PMPI_Pack_size(sendcount, sendtype, comm, &datasize);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Pack_size() (%i)", res);
NBC_Error("MPI Error in PMPI_Pack_size() (%i)", res);
return res;
}
}
Expand Down
16 changes: 8 additions & 8 deletions ompi/mca/coll/libnbc/nbc_ialltoallv.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,15 +37,15 @@ int ompi_coll_libnbc_ialltoallv(const void* sendbuf, const int *sendcounts, cons
rank = ompi_comm_rank (comm);
p = ompi_comm_size (comm);

res = MPI_Type_extent (sendtype, &sndext);
res = ompi_datatype_type_extent (sendtype, &sndext);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

res = MPI_Type_extent (recvtype, &rcvext);
res = ompi_datatype_type_extent (recvtype, &rcvext);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

Expand Down Expand Up @@ -128,15 +128,15 @@ int ompi_coll_libnbc_ialltoallv_inter (const void* sendbuf, const int *sendcount
ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;


res = MPI_Type_extent(sendtype, &sndext);
res = ompi_datatype_type_extent(sendtype, &sndext);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

res = MPI_Type_extent(recvtype, &rcvext);
res = ompi_datatype_type_extent(recvtype, &rcvext);
if (MPI_SUCCESS != res) {
NBC_Error("MPI Error in MPI_Type_extent() (%i)", res);
NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
return res;
}

Expand Down
Loading