Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Common/include/linear_algebra/CPastixWrapper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ class CPastixWrapper
* \brief Run the external solver for the task it is currently setup to execute.
*/
void Run() {
dpastix(&state, MPI_COMM_WORLD, nCols, colptr.data(), rowidx.data(), values.data(),
dpastix(&state, SU2_MPI::GetComm(), nCols, colptr.data(), rowidx.data(), values.data(),
loc2glb.data(), perm.data(), NULL, workvec.data(), 1, iparm, dparm);
}

Expand Down
2 changes: 1 addition & 1 deletion Common/include/linear_algebra/CSysVector.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ class CSysVector : public VecExpr::CVecExpr<CSysVector<ScalarType>, ScalarType>
SU2_OMP_MASTER {
sum = dotRes;
const auto mpi_type = (sizeof(ScalarType) < sizeof(double)) ? MPI_FLOAT : MPI_DOUBLE;
SelectMPIWrapper<ScalarType>::W::Allreduce(&sum, &dotRes, 1, mpi_type, MPI_SUM, MPI_COMM_WORLD);
SelectMPIWrapper<ScalarType>::W::Allreduce(&sum, &dotRes, 1, mpi_type, MPI_SUM, SU2_MPI::GetComm());
}
}
#endif
Expand Down
8 changes: 8 additions & 0 deletions Common/include/parallelization/mpi_structure.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,17 @@

#include "mpi_structure.hpp"


/* Initialise the MPI Communicator Rank and Size */
int CBaseMPIWrapper::Rank = 0;
int CBaseMPIWrapper::Size = 1;

/* Set the default MPI Communicator */
#ifdef HAVE_MPI
CBaseMPIWrapper::Comm CBaseMPIWrapper::currentComm = MPI_COMM_WORLD;
#else
CBaseMPIWrapper::Comm CBaseMPIWrapper::currentComm = 0; // dummy value
#endif
Comment on lines +35 to +40
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I like this solution.
The rank and size are updated by the SetComm function so everything should work ok when creating a driver from scratch.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Glad to hear that. Is anything still missing ? Not sure how to go about the failing check for code complexity ...

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not for me, I'll ask around the developers meeting tomorrow if anyone else has comments, codefactor glitches out sometimes no need to do anything.

By the way would you like to be added to su2code? You can then have branches here instead of in your fork, makes it simpler to manage future PR's you might open.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I suppose it would be more convenient that way. Thanks !


#ifdef HAVE_MPI
int CBaseMPIWrapper::MinRankError;
Expand Down
1 change: 0 additions & 1 deletion Common/include/parallelization/mpi_structure.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,6 @@ class CMediMPIWrapper : public CBaseMPIWrapper {

#else // HAVE_MPI

#define MPI_COMM_WORLD 0
#define MPI_UNSIGNED_LONG 1
#define MPI_LONG 2
#define MPI_UNSIGNED_SHORT 3
Expand Down
4 changes: 2 additions & 2 deletions Common/include/toolboxes/CQuasiNewtonInvLeastSquares.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,11 +95,11 @@ class CQuasiNewtonInvLeastSquares {

su2vector<Scalar> tmp(mat.size());
MPI_Wrapper::Allreduce(mat.data(), tmp.data(), iSample*(iSample+1)/2,
type, MPI_SUM, MPI_COMM_WORLD);
type, MPI_SUM, SU2_MPI::GetComm());
mat = std::move(tmp);

MPI_Wrapper::Allreduce(rhs.data(), sol.data(), iSample,
type, MPI_SUM, MPI_COMM_WORLD);
type, MPI_SUM, SU2_MPI::GetComm());
std::swap(rhs, sol);
}
}
Expand Down
45 changes: 21 additions & 24 deletions Common/src/CConfig.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4953,10 +4953,7 @@ void CConfig::SetMarkers(unsigned short val_software) {
iMarker_Turbomachinery, iMarker_MixingPlaneInterface;

int size = SINGLE_NODE;

#ifdef HAVE_MPI
SU2_MPI::Comm_size(MPI_COMM_WORLD, &size);
#endif
SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size);

/*--- Compute the total number of markers in the config file ---*/

Expand Down Expand Up @@ -9214,8 +9211,8 @@ void CConfig::SetProfilingCSV(void) {
int rank = MASTER_NODE;
int size = SINGLE_NODE;
#ifdef HAVE_MPI
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
SU2_MPI::Comm_size(MPI_COMM_WORLD, &size);
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);
SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size);
#endif

/*--- Each rank has the same stack trace, so the they have the same
Expand Down Expand Up @@ -9299,11 +9296,11 @@ void CConfig::SetProfilingCSV(void) {
}

#ifdef HAVE_MPI
MPI_Reduce(n_calls, n_calls_red, map_size, MPI_INT, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD);
MPI_Reduce(l_tot, l_tot_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD);
MPI_Reduce(l_avg, l_avg_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD);
MPI_Reduce(l_min, l_min_red, map_size, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD);
MPI_Reduce(l_max, l_max_red, map_size, MPI_DOUBLE, MPI_MAX, MASTER_NODE, MPI_COMM_WORLD);
MPI_Reduce(n_calls, n_calls_red, map_size, MPI_INT, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm());
MPI_Reduce(l_tot, l_tot_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm());
MPI_Reduce(l_avg, l_avg_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm());
MPI_Reduce(l_min, l_min_red, map_size, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm());
MPI_Reduce(l_max, l_max_red, map_size, MPI_DOUBLE, MPI_MAX, MASTER_NODE, SU2_MPI::GetComm());
#else
memcpy(n_calls_red, n_calls, map_size*sizeof(int));
memcpy(l_tot_red, l_tot, map_size*sizeof(double));
Expand Down Expand Up @@ -9437,8 +9434,8 @@ void CConfig::GEMMProfilingCSV(void) {
/* Parallel executable. The profiling data must be sent to the master node.
First determine the rank and size. */
int size;
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
SU2_MPI::Comm_size(MPI_COMM_WORLD, &size);
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);
SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size);

/* Check for the master node. */
if(rank == MASTER_NODE) {
Expand All @@ -9449,7 +9446,7 @@ void CConfig::GEMMProfilingCSV(void) {
/* Block until a message from this processor arrives. Determine
the number of entries in the receive buffers. */
SU2_MPI::Status status;
SU2_MPI::Probe(proc, 0, MPI_COMM_WORLD, &status);
SU2_MPI::Probe(proc, 0, SU2_MPI::GetComm(), &status);

int nEntries;
SU2_MPI::Get_count(&status, MPI_LONG, &nEntries);
Expand All @@ -9463,15 +9460,15 @@ void CConfig::GEMMProfilingCSV(void) {
vector<long> recvBufMNK(3*nEntries);

SU2_MPI::Recv(recvBufNCalls.data(), recvBufNCalls.size(),
MPI_LONG, proc, 0, MPI_COMM_WORLD, &status);
MPI_LONG, proc, 0, SU2_MPI::GetComm(), &status);
SU2_MPI::Recv(recvBufTotTime.data(), recvBufTotTime.size(),
MPI_DOUBLE, proc, 1, MPI_COMM_WORLD, &status);
MPI_DOUBLE, proc, 1, SU2_MPI::GetComm(), &status);
SU2_MPI::Recv(recvBufMinTime.data(), recvBufMinTime.size(),
MPI_DOUBLE, proc, 2, MPI_COMM_WORLD, &status);
MPI_DOUBLE, proc, 2, SU2_MPI::GetComm(), &status);
SU2_MPI::Recv(recvBufMaxTime.data(), recvBufMaxTime.size(),
MPI_DOUBLE, proc, 3, MPI_COMM_WORLD, &status);
MPI_DOUBLE, proc, 3, SU2_MPI::GetComm(), &status);
SU2_MPI::Recv(recvBufMNK.data(), recvBufMNK.size(),
MPI_LONG, proc, 4, MPI_COMM_WORLD, &status);
MPI_LONG, proc, 4, SU2_MPI::GetComm(), &status);

/* Loop over the number of entries. */
for(int i=0; i<nEntries; ++i) {
Expand Down Expand Up @@ -9520,15 +9517,15 @@ void CConfig::GEMMProfilingCSV(void) {

/* Send the data to the master node using blocking sends. */
SU2_MPI::Send(GEMM_Profile_NCalls.data(), GEMM_Profile_NCalls.size(),
MPI_LONG, MASTER_NODE, 0, MPI_COMM_WORLD);
MPI_LONG, MASTER_NODE, 0, SU2_MPI::GetComm());
SU2_MPI::Send(GEMM_Profile_TotTime.data(), GEMM_Profile_TotTime.size(),
MPI_DOUBLE, MASTER_NODE, 1, MPI_COMM_WORLD);
MPI_DOUBLE, MASTER_NODE, 1, SU2_MPI::GetComm());
SU2_MPI::Send(GEMM_Profile_MinTime.data(), GEMM_Profile_MinTime.size(),
MPI_DOUBLE, MASTER_NODE, 2, MPI_COMM_WORLD);
MPI_DOUBLE, MASTER_NODE, 2, SU2_MPI::GetComm());
SU2_MPI::Send(GEMM_Profile_MaxTime.data(), GEMM_Profile_MaxTime.size(),
MPI_DOUBLE, MASTER_NODE, 3, MPI_COMM_WORLD);
MPI_DOUBLE, MASTER_NODE, 3, SU2_MPI::GetComm());
SU2_MPI::Send(sendBufMNK.data(), sendBufMNK.size(),
MPI_LONG, MASTER_NODE, 4, MPI_COMM_WORLD);
MPI_LONG, MASTER_NODE, 4, SU2_MPI::GetComm());
}

#endif
Expand Down
22 changes: 11 additions & 11 deletions Common/src/adt/CADTElemClass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,14 +75,14 @@ CADTElemClass::CADTElemClass(unsigned short val_nDim,
/*--- First determine the number of points per rank and make them
available to all ranks. ---*/
int rank, size;
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
SU2_MPI::Comm_size(MPI_COMM_WORLD, &size);
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);
SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size);

vector<int> recvCounts(size), displs(size);
int sizeLocal = (int) val_coor.size();

SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1,
MPI_INT, MPI_COMM_WORLD);
MPI_INT, SU2_MPI::GetComm());
displs[0] = 0;
for(int i=1; i<size; ++i) displs[i] = displs[i-1] + recvCounts[i-1];

Expand All @@ -98,14 +98,14 @@ CADTElemClass::CADTElemClass(unsigned short val_nDim,

coorPoints.resize(sizeGlobal);
SU2_MPI::Allgatherv(val_coor.data(), sizeLocal, MPI_DOUBLE, coorPoints.data(),
recvCounts.data(), displs.data(), MPI_DOUBLE, MPI_COMM_WORLD);
recvCounts.data(), displs.data(), MPI_DOUBLE, SU2_MPI::GetComm());

/*--- Determine the number of elements per rank and make them
available to all ranks. ---*/
sizeLocal = (int) val_VTKElem.size();

SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1,
MPI_INT, MPI_COMM_WORLD);
MPI_INT, SU2_MPI::GetComm());
displs[0] = 0;
for(int i=1; i<size; ++i) displs[i] = displs[i-1] + recvCounts[i-1];

Expand All @@ -118,13 +118,13 @@ CADTElemClass::CADTElemClass(unsigned short val_nDim,
localElemIDs.resize(sizeGlobal);

SU2_MPI::Allgatherv(val_VTKElem.data(), sizeLocal, MPI_UNSIGNED_SHORT, elemVTK_Type.data(),
recvCounts.data(), displs.data(), MPI_UNSIGNED_SHORT, MPI_COMM_WORLD);
recvCounts.data(), displs.data(), MPI_UNSIGNED_SHORT, SU2_MPI::GetComm());

SU2_MPI::Allgatherv(val_markerID.data(), sizeLocal, MPI_UNSIGNED_SHORT, localMarkers.data(),
recvCounts.data(), displs.data(), MPI_UNSIGNED_SHORT, MPI_COMM_WORLD);
recvCounts.data(), displs.data(), MPI_UNSIGNED_SHORT, SU2_MPI::GetComm());

SU2_MPI::Allgatherv(val_elemID.data(), sizeLocal, MPI_UNSIGNED_LONG, localElemIDs.data(),
recvCounts.data(), displs.data(), MPI_UNSIGNED_LONG, MPI_COMM_WORLD);
recvCounts.data(), displs.data(), MPI_UNSIGNED_LONG, SU2_MPI::GetComm());

/*--- Create the content of ranksOfElems, which stores the original ranks
where the elements come from. ---*/
Expand All @@ -140,7 +140,7 @@ CADTElemClass::CADTElemClass(unsigned short val_nDim,
sizeLocal = (int) val_connElem.size();

SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1,
MPI_INT, MPI_COMM_WORLD);
MPI_INT, SU2_MPI::GetComm());
displs[0] = 0;
for(int i=1; i<size; ++i) displs[i] = displs[i-1] + recvCounts[i-1];

Expand All @@ -150,14 +150,14 @@ CADTElemClass::CADTElemClass(unsigned short val_nDim,
elemConns.resize(sizeGlobal);

SU2_MPI::Allgatherv(val_connElem.data(), sizeLocal, MPI_UNSIGNED_LONG, elemConns.data(),
recvCounts.data(), displs.data(), MPI_UNSIGNED_LONG, MPI_COMM_WORLD);
recvCounts.data(), displs.data(), MPI_UNSIGNED_LONG, SU2_MPI::GetComm());
}
else {

/*--- A local tree must be built. Copy the data from the arguments into the
member variables and set the ranks to the rank of this processor. ---*/
int rank;
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);

coorPoints = val_coor;
elemConns = val_connElem;
Expand Down
14 changes: 7 additions & 7 deletions Common/src/adt/CADTPointsOnlyClass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,14 +52,14 @@ CADTPointsOnlyClass::CADTPointsOnlyClass(unsigned short nDim,
First determine the number of points per rank and store them in such
a way that the info can be used directly in Allgatherv. ---*/
int rank, size;
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
SU2_MPI::Comm_size(MPI_COMM_WORLD, &size);
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);
SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size);

vector<int> recvCounts(size), displs(size);
int sizeLocal = (int) nPoints;

SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1,
MPI_INT, MPI_COMM_WORLD);
MPI_INT, SU2_MPI::GetComm());
displs[0] = 0;
for(int i=1; i<size; ++i) displs[i] = displs[i-1] + recvCounts[i-1];

Expand All @@ -69,26 +69,26 @@ CADTPointsOnlyClass::CADTPointsOnlyClass(unsigned short nDim,
localPointIDs.resize(sizeGlobal);
SU2_MPI::Allgatherv(pointID, sizeLocal, MPI_UNSIGNED_LONG, localPointIDs.data(),
recvCounts.data(), displs.data(), MPI_UNSIGNED_LONG,
MPI_COMM_WORLD);
SU2_MPI::GetComm());

ranksOfPoints.resize(sizeGlobal);
vector<int> rankLocal(sizeLocal, rank);
SU2_MPI::Allgatherv(rankLocal.data(), sizeLocal, MPI_INT, ranksOfPoints.data(),
recvCounts.data(), displs.data(), MPI_INT, MPI_COMM_WORLD);
recvCounts.data(), displs.data(), MPI_INT, SU2_MPI::GetComm());

/*--- Gather the coordinates of the points on all ranks. ---*/
for(int i=0; i<size; ++i) {recvCounts[i] *= nDim; displs[i] *= nDim;}

coorPoints.resize(nDim*sizeGlobal);
SU2_MPI::Allgatherv(coor, nDim*sizeLocal, MPI_DOUBLE, coorPoints.data(),
recvCounts.data(), displs.data(), MPI_DOUBLE, MPI_COMM_WORLD);
recvCounts.data(), displs.data(), MPI_DOUBLE, SU2_MPI::GetComm());
}
else {

/*--- A local tree must be built. Copy the coordinates and point IDs and
set the ranks to the rank of this processor. ---*/
int rank;
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);

coorPoints.assign(coor, coor + nDim*nPoints);
localPointIDs.assign(pointID, pointID + nPoints);
Expand Down
Loading