From f3ce5bb7ac5939b99f0f433574a3abc2676d8477 Mon Sep 17 00:00:00 2001 From: Alessandro Gastaldi Date: Fri, 29 Jan 2021 17:05:46 +0100 Subject: [PATCH 1/2] Replace global MPI_COMM_WORLD with SU2_MPI::GetComm() where appropriate --- .../include/linear_algebra/CPastixWrapper.hpp | 2 +- Common/include/linear_algebra/CSysVector.hpp | 2 +- .../toolboxes/CQuasiNewtonInvLeastSquares.hpp | 4 +- Common/src/CConfig.cpp | 42 ++-- Common/src/adt/CADTElemClass.cpp | 22 +- Common/src/adt/CADTPointsOnlyClass.cpp | 14 +- Common/src/fem/fem_geometry_structure.cpp | 94 ++++---- .../src/fem/geometry_structure_fem_part.cpp | 154 ++++++------ Common/src/geometry/CGeometry.cpp | 72 +++--- Common/src/geometry/CMultiGridGeometry.cpp | 8 +- Common/src/geometry/CPhysicalGeometry.cpp | 224 +++++++++--------- .../meshreader/CCGNSMeshReaderFVM.cpp | 30 +-- Common/src/graph_coloring_structure.cpp | 16 +- Common/src/grid_movement/CSurfaceMovement.cpp | 36 +-- .../src/grid_movement/CVolumetricMovement.cpp | 12 +- .../interface_interpolation/CInterpolator.cpp | 62 ++--- .../CIsoparametric.cpp | 6 +- .../src/interface_interpolation/CMirror.cpp | 18 +- .../CNearestNeighbor.cpp | 6 +- .../CRadialBasisFunction.cpp | 16 +- Common/src/linear_algebra/CPastixWrapper.cpp | 4 +- Common/src/linear_algebra/CSysMatrix.cpp | 4 +- SU2_CFD/include/limiters/CLimiterDetails.hpp | 4 +- .../include/solvers/CFVMFlowSolverBase.inl | 16 +- SU2_CFD/src/CMarkerProfileReaderFVM.cpp | 12 +- SU2_CFD/src/definition_structure.cpp | 16 +- .../src/drivers/CDiscAdjMultizoneDriver.cpp | 2 +- .../src/drivers/CDiscAdjSinglezoneDriver.cpp | 2 +- SU2_CFD/src/drivers/CMultizoneDriver.cpp | 2 +- SU2_CFD/src/drivers/CSinglezoneDriver.cpp | 2 +- SU2_CFD/src/integration/CIntegration.cpp | 6 +- SU2_CFD/src/interfaces/CInterface.cpp | 28 +-- SU2_CFD/src/output/CFlowOutput.cpp | 30 +-- SU2_CFD/src/output/COutput.cpp | 6 +- .../src/output/filewriter/CCSVFileWriter.cpp | 8 +- .../src/output/filewriter/CFEMDataSorter.cpp | 2 +- .../src/output/filewriter/CFVMDataSorter.cpp | 10 +- .../output/filewriter/CParallelDataSorter.cpp | 18 +- .../output/filewriter/CParallelFileWriter.cpp | 6 +- .../output/filewriter/CParaviewFileWriter.cpp | 26 +- .../src/output/filewriter/CSTLFileWriter.cpp | 12 +- .../src/output/filewriter/CSU2FileWriter.cpp | 2 +- .../output/filewriter/CSU2MeshFileWriter.cpp | 6 +- .../filewriter/CSurfaceFEMDataSorter.cpp | 16 +- .../filewriter/CSurfaceFVMDataSorter.cpp | 40 ++-- .../filewriter/CTecplotBinaryFileWriter.cpp | 24 +- .../output/filewriter/CTecplotFileWriter.cpp | 6 +- .../src/output/output_structure_legacy.cpp | 86 +++---- SU2_CFD/src/python_wrapper_structure.cpp | 6 +- SU2_CFD/src/solvers/CAdjEulerSolver.cpp | 50 ++-- SU2_CFD/src/solvers/CAdjNSSolver.cpp | 14 +- SU2_CFD/src/solvers/CAdjTurbSolver.cpp | 2 +- SU2_CFD/src/solvers/CBaselineSolver.cpp | 10 +- SU2_CFD/src/solvers/CBaselineSolver_FEM.cpp | 12 +- SU2_CFD/src/solvers/CDiscAdjFEASolver.cpp | 12 +- SU2_CFD/src/solvers/CDiscAdjSolver.cpp | 24 +- SU2_CFD/src/solvers/CEulerSolver.cpp | 126 +++++----- SU2_CFD/src/solvers/CFEASolver.cpp | 34 +-- SU2_CFD/src/solvers/CFEM_DG_EulerSolver.cpp | 66 +++--- SU2_CFD/src/solvers/CFEM_DG_NSSolver.cpp | 8 +- SU2_CFD/src/solvers/CHeatSolver.cpp | 34 +-- SU2_CFD/src/solvers/CIncEulerSolver.cpp | 28 +-- SU2_CFD/src/solvers/CIncNSSolver.cpp | 22 +- SU2_CFD/src/solvers/CMeshSolver.cpp | 10 +- SU2_CFD/src/solvers/CNEMOEulerSolver.cpp | 14 +- SU2_CFD/src/solvers/CNEMONSSolver.cpp | 4 +- SU2_CFD/src/solvers/CNSSolver.cpp | 8 +- SU2_CFD/src/solvers/CRadP1Solver.cpp | 4 +- SU2_CFD/src/solvers/CSolver.cpp | 38 +-- 69 files changed, 881 insertions(+), 881 deletions(-) diff --git a/Common/include/linear_algebra/CPastixWrapper.hpp b/Common/include/linear_algebra/CPastixWrapper.hpp index 3f7a87ecf5f6..bb42f4a4f0ba 100644 --- a/Common/include/linear_algebra/CPastixWrapper.hpp +++ b/Common/include/linear_algebra/CPastixWrapper.hpp @@ -93,7 +93,7 @@ class CPastixWrapper * \brief Run the external solver for the task it is currently setup to execute. */ void Run() { - dpastix(&state, MPI_COMM_WORLD, nCols, colptr.data(), rowidx.data(), values.data(), + dpastix(&state, SU2_MPI::GetComm(), nCols, colptr.data(), rowidx.data(), values.data(), loc2glb.data(), perm.data(), NULL, workvec.data(), 1, iparm, dparm); } diff --git a/Common/include/linear_algebra/CSysVector.hpp b/Common/include/linear_algebra/CSysVector.hpp index 2896b8ea8784..405f8c030380 100644 --- a/Common/include/linear_algebra/CSysVector.hpp +++ b/Common/include/linear_algebra/CSysVector.hpp @@ -315,7 +315,7 @@ class CSysVector : public VecExpr::CVecExpr, ScalarType> SU2_OMP_MASTER { sum = dotRes; const auto mpi_type = (sizeof(ScalarType) < sizeof(double)) ? MPI_FLOAT : MPI_DOUBLE; - SelectMPIWrapper::W::Allreduce(&sum, &dotRes, 1, mpi_type, MPI_SUM, MPI_COMM_WORLD); + SelectMPIWrapper::W::Allreduce(&sum, &dotRes, 1, mpi_type, MPI_SUM, SU2_MPI::GetComm()); } } #endif diff --git a/Common/include/toolboxes/CQuasiNewtonInvLeastSquares.hpp b/Common/include/toolboxes/CQuasiNewtonInvLeastSquares.hpp index cbd5f192a811..645616b62740 100644 --- a/Common/include/toolboxes/CQuasiNewtonInvLeastSquares.hpp +++ b/Common/include/toolboxes/CQuasiNewtonInvLeastSquares.hpp @@ -95,11 +95,11 @@ class CQuasiNewtonInvLeastSquares { su2vector tmp(mat.size()); MPI_Wrapper::Allreduce(mat.data(), tmp.data(), iSample*(iSample+1)/2, - type, MPI_SUM, MPI_COMM_WORLD); + type, MPI_SUM, SU2_MPI::GetComm()); mat = std::move(tmp); MPI_Wrapper::Allreduce(rhs.data(), sol.data(), iSample, - type, MPI_SUM, MPI_COMM_WORLD); + type, MPI_SUM, SU2_MPI::GetComm()); std::swap(rhs, sol); } } diff --git a/Common/src/CConfig.cpp b/Common/src/CConfig.cpp index 6dabd4d63115..2018f936cbac 100644 --- a/Common/src/CConfig.cpp +++ b/Common/src/CConfig.cpp @@ -5011,7 +5011,7 @@ void CConfig::SetMarkers(unsigned short val_software) { #ifdef HAVE_MPI if (val_software != SU2_MSH) - SU2_MPI::Comm_size(MPI_COMM_WORLD, &size); + SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size); #endif /*--- Compute the total number of markers in the config file ---*/ @@ -9334,8 +9334,8 @@ void CConfig::SetProfilingCSV(void) { int rank = MASTER_NODE; int size = SINGLE_NODE; #ifdef HAVE_MPI - SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank); - SU2_MPI::Comm_size(MPI_COMM_WORLD, &size); + SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank); + SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size); #endif /*--- Each rank has the same stack trace, so the they have the same @@ -9419,11 +9419,11 @@ void CConfig::SetProfilingCSV(void) { } #ifdef HAVE_MPI - MPI_Reduce(n_calls, n_calls_red, map_size, MPI_INT, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); - MPI_Reduce(l_tot, l_tot_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); - MPI_Reduce(l_avg, l_avg_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); - MPI_Reduce(l_min, l_min_red, map_size, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD); - MPI_Reduce(l_max, l_max_red, map_size, MPI_DOUBLE, MPI_MAX, MASTER_NODE, MPI_COMM_WORLD); + MPI_Reduce(n_calls, n_calls_red, map_size, MPI_INT, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); + MPI_Reduce(l_tot, l_tot_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); + MPI_Reduce(l_avg, l_avg_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); + MPI_Reduce(l_min, l_min_red, map_size, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm()); + MPI_Reduce(l_max, l_max_red, map_size, MPI_DOUBLE, MPI_MAX, MASTER_NODE, SU2_MPI::GetComm()); #else memcpy(n_calls_red, n_calls, map_size*sizeof(int)); memcpy(l_tot_red, l_tot, map_size*sizeof(double)); @@ -9557,8 +9557,8 @@ void CConfig::GEMMProfilingCSV(void) { /* Parallel executable. The profiling data must be sent to the master node. First determine the rank and size. */ int size; - SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank); - SU2_MPI::Comm_size(MPI_COMM_WORLD, &size); + SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank); + SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size); /* Check for the master node. */ if(rank == MASTER_NODE) { @@ -9569,7 +9569,7 @@ void CConfig::GEMMProfilingCSV(void) { /* Block until a message from this processor arrives. Determine the number of entries in the receive buffers. */ SU2_MPI::Status status; - SU2_MPI::Probe(proc, 0, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(proc, 0, SU2_MPI::GetComm(), &status); int nEntries; SU2_MPI::Get_count(&status, MPI_LONG, &nEntries); @@ -9583,15 +9583,15 @@ void CConfig::GEMMProfilingCSV(void) { vector recvBufMNK(3*nEntries); SU2_MPI::Recv(recvBufNCalls.data(), recvBufNCalls.size(), - MPI_LONG, proc, 0, MPI_COMM_WORLD, &status); + MPI_LONG, proc, 0, SU2_MPI::GetComm(), &status); SU2_MPI::Recv(recvBufTotTime.data(), recvBufTotTime.size(), - MPI_DOUBLE, proc, 1, MPI_COMM_WORLD, &status); + MPI_DOUBLE, proc, 1, SU2_MPI::GetComm(), &status); SU2_MPI::Recv(recvBufMinTime.data(), recvBufMinTime.size(), - MPI_DOUBLE, proc, 2, MPI_COMM_WORLD, &status); + MPI_DOUBLE, proc, 2, SU2_MPI::GetComm(), &status); SU2_MPI::Recv(recvBufMaxTime.data(), recvBufMaxTime.size(), - MPI_DOUBLE, proc, 3, MPI_COMM_WORLD, &status); + MPI_DOUBLE, proc, 3, SU2_MPI::GetComm(), &status); SU2_MPI::Recv(recvBufMNK.data(), recvBufMNK.size(), - MPI_LONG, proc, 4, MPI_COMM_WORLD, &status); + MPI_LONG, proc, 4, SU2_MPI::GetComm(), &status); /* Loop over the number of entries. */ for(int i=0; i recvCounts(size), displs(size); int sizeLocal = (int) val_coor.size(); SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1, - MPI_INT, MPI_COMM_WORLD); + MPI_INT, SU2_MPI::GetComm()); displs[0] = 0; for(int i=1; i recvCounts(size), displs(size); int sizeLocal = (int) nPoints; SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1, - MPI_INT, MPI_COMM_WORLD); + MPI_INT, SU2_MPI::GetComm()); displs[0] = 0; for(int i=1; i rankLocal(sizeLocal, rank); SU2_MPI::Allgatherv(rankLocal.data(), sizeLocal, MPI_INT, ranksOfPoints.data(), - recvCounts.data(), displs.data(), MPI_INT, MPI_COMM_WORLD); + recvCounts.data(), displs.data(), MPI_INT, SU2_MPI::GetComm()); /*--- Gather the coordinates of the points on all ranks. ---*/ for(int i=0; i sizeRecv(size, 1); SU2_MPI::Reduce_scatter(sendToRank.data(), &nRankRecv, sizeRecv.data(), - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_INT, MPI_SUM, SU2_MPI::GetComm()); #endif /*--- Loop over the local elements to fill the communication buffers with element data. ---*/ @@ -468,11 +468,11 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { int dest = MI->first; SU2_MPI::Isend(shortSendBuf[i].data(), shortSendBuf[i].size(), MPI_SHORT, - dest, dest, MPI_COMM_WORLD, &commReqs[3*i]); + dest, dest, SU2_MPI::GetComm(), &commReqs[3*i]); SU2_MPI::Isend(longSendBuf[i].data(), longSendBuf[i].size(), MPI_LONG, - dest, dest+1, MPI_COMM_WORLD, &commReqs[3*i+1]); + dest, dest+1, SU2_MPI::GetComm(), &commReqs[3*i+1]); SU2_MPI::Isend(doubleSendBuf[i].data(), doubleSendBuf[i].size(), MPI_DOUBLE, - dest, dest+2, MPI_COMM_WORLD, &commReqs[3*i+2]); + dest, dest+2, SU2_MPI::GetComm(), &commReqs[3*i+2]); } /* Loop over the number of ranks from which I receive data. */ @@ -481,7 +481,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { /* Block until a message with shorts arrives from any processor. Determine the source and the size of the message. */ SU2_MPI::Status status; - SU2_MPI::Probe(MPI_ANY_SOURCE, rank, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(MPI_ANY_SOURCE, rank, SU2_MPI::GetComm(), &status); int source = status.MPI_SOURCE; int sizeMess; @@ -490,24 +490,24 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { /* Allocate the memory for the short receive buffer and receive the message. */ shortRecvBuf[i].resize(sizeMess); SU2_MPI::Recv(shortRecvBuf[i].data(), sizeMess, MPI_SHORT, - source, rank, MPI_COMM_WORLD, &status); + source, rank, SU2_MPI::GetComm(), &status); /* Block until the corresponding message with longs arrives, determine its size, allocate the memory and receive the message. */ - SU2_MPI::Probe(source, rank+1, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(source, rank+1, SU2_MPI::GetComm(), &status); SU2_MPI::Get_count(&status, MPI_LONG, &sizeMess); longRecvBuf[i].resize(sizeMess); SU2_MPI::Recv(longRecvBuf[i].data(), sizeMess, MPI_LONG, - source, rank+1, MPI_COMM_WORLD, &status); + source, rank+1, SU2_MPI::GetComm(), &status); /* Idem for the message with doubles. */ - SU2_MPI::Probe(source, rank+2, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(source, rank+2, SU2_MPI::GetComm(), &status); SU2_MPI::Get_count(&status, MPI_DOUBLE, &sizeMess); doubleRecvBuf[i].resize(sizeMess); SU2_MPI::Recv(doubleRecvBuf[i].data(), sizeMess, MPI_DOUBLE, - source, rank+2, MPI_COMM_WORLD, &status); + source, rank+2, SU2_MPI::GetComm(), &status); } /* Complete the non-blocking sends. */ @@ -515,7 +515,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { /* Wild cards have been used in the communication, so synchronize the ranks to avoid problems. */ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #else @@ -701,7 +701,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { #ifdef HAVE_MPI SU2_MPI::Allreduce(&maxTimeLevelLoc, &maxTimeLevelGlob, - 1, MPI_UNSIGNED_SHORT, MPI_MAX, MPI_COMM_WORLD); + 1, MPI_UNSIGNED_SHORT, MPI_MAX, SU2_MPI::GetComm()); #endif const unsigned short nTimeLevels = maxTimeLevelGlob+1; @@ -763,7 +763,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { #ifdef HAVE_MPI SU2_MPI::Reduce_scatter(sendToRank.data(), &nRankRecv, sizeRecv.data(), - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_INT, MPI_SUM, SU2_MPI::GetComm()); #endif /* Loop over the local halo elements to fill the communication buffers. */ @@ -808,7 +808,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { for(int i=0; ifirst; SU2_MPI::Isend(longSendBuf[i].data(), longSendBuf[i].size(), MPI_LONG, - dest, dest, MPI_COMM_WORLD, &commReqs[i]); + dest, dest, SU2_MPI::GetComm(), &commReqs[i]); } /* Loop over the number of ranks from which I receive data. */ @@ -817,7 +817,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { /* Block until a message with longs arrives from any processor. Determine the source and the size of the message and receive it. */ SU2_MPI::Status status; - SU2_MPI::Probe(MPI_ANY_SOURCE, rank, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(MPI_ANY_SOURCE, rank, SU2_MPI::GetComm(), &status); sourceRank[i] = status.MPI_SOURCE; int sizeMess; @@ -825,7 +825,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { longSecondRecvBuf[i].resize(sizeMess); SU2_MPI::Recv(longSecondRecvBuf[i].data(), sizeMess, MPI_LONG, - sourceRank[i], rank, MPI_COMM_WORLD, &status); + sourceRank[i], rank, SU2_MPI::GetComm(), &status); } /* Complete the non-blocking sends. */ @@ -895,7 +895,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { #ifdef HAVE_MPI int dest = sourceRank[i]; SU2_MPI::Isend(longSendBuf[i].data(), longSendBuf[i].size(), MPI_LONG, - dest, dest+1, MPI_COMM_WORLD, &commReqs[i]); + dest, dest+1, SU2_MPI::GetComm(), &commReqs[i]); #endif } @@ -914,7 +914,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { /* Block until a message with longs arrives from any processor. Determine the source and the size of the message. */ SU2_MPI::Status status; - SU2_MPI::Probe(MPI_ANY_SOURCE, rank+1, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(MPI_ANY_SOURCE, rank+1, SU2_MPI::GetComm(), &status); int source = status.MPI_SOURCE; int sizeMess; @@ -923,13 +923,13 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { /* Allocate the memory for the long receive buffer and receive the message. */ longSecondRecvBuf[i].resize(sizeMess); SU2_MPI::Recv(longSecondRecvBuf[i].data(), sizeMess, MPI_LONG, - source, rank+1, MPI_COMM_WORLD, &status); + source, rank+1, SU2_MPI::GetComm(), &status); } /* Complete the non-blocking sends and synchronize the ranks, because wild cards have been used. */ SU2_MPI::Waitall(nRankRecv, commReqs.data(), MPI_STATUSES_IGNORE); - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #else @@ -1000,7 +1000,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { #ifdef HAVE_MPI SU2_MPI::Reduce_scatter(sendToRank.data(), &nRankRecv, sizeRecv.data(), - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_INT, MPI_SUM, SU2_MPI::GetComm()); #endif /* Copy the data to be sent to the send buffers. */ @@ -1030,7 +1030,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { for(int i=0; ifirst; SU2_MPI::Isend(longSendBuf[i].data(), longSendBuf[i].size(), MPI_LONG, - dest, dest, MPI_COMM_WORLD, &commReqs[i]); + dest, dest, SU2_MPI::GetComm(), &commReqs[i]); } /* Resize the vector to store the ranks from which the message came. */ @@ -1042,7 +1042,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { /* Block until a message with longs arrives from any processor. Determine the source and the size of the message and receive it. */ SU2_MPI::Status status; - SU2_MPI::Probe(MPI_ANY_SOURCE, rank, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(MPI_ANY_SOURCE, rank, SU2_MPI::GetComm(), &status); sourceRank[i] = status.MPI_SOURCE; int sizeMess; @@ -1050,13 +1050,13 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { longSecondRecvBuf[i].resize(sizeMess); SU2_MPI::Recv(longSecondRecvBuf[i].data(), sizeMess, MPI_LONG, - sourceRank[i], rank, MPI_COMM_WORLD, &status); + sourceRank[i], rank, SU2_MPI::GetComm(), &status); } /* Complete the non-blocking sends and synchronize the ranks, because wild cards have been used. */ SU2_MPI::Waitall(nRankSend, commReqs.data(), MPI_STATUSES_IGNORE); - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #else @@ -1215,7 +1215,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { unsigned long nRanksTooManyPartChunks = tooManyPartChunksLoc; #ifdef HAVE_MPI SU2_MPI::Reduce(&tooManyPartChunksLoc, &nRanksTooManyPartChunks, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); #endif if((rank == MASTER_NODE) && (nRanksTooManyPartChunks != 0) && (size > 1)) { @@ -1404,7 +1404,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { unsigned long nEmptyPartitions = 0; SU2_MPI::Reduce(&thisPartitionEmpty, &nEmptyPartitions, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); if(rank == MASTER_NODE && nEmptyPartitions) { cout << endl << " WARNING" << endl; @@ -1671,11 +1671,11 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { #ifdef HAVE_MPI int dest = sourceRank[i]; SU2_MPI::Isend(shortSendBuf[i].data(), shortSendBuf[i].size(), MPI_SHORT, - dest, dest+1, MPI_COMM_WORLD, &commReqs[3*i]); + dest, dest+1, SU2_MPI::GetComm(), &commReqs[3*i]); SU2_MPI::Isend(longSendBuf[i].data(), longSendBuf[i].size(), MPI_LONG, - dest, dest+2, MPI_COMM_WORLD, &commReqs[3*i+1]); + dest, dest+2, SU2_MPI::GetComm(), &commReqs[3*i+1]); SU2_MPI::Isend(doubleSendBuf[i].data(), doubleSendBuf[i].size(), MPI_DOUBLE, - dest, dest+3, MPI_COMM_WORLD, &commReqs[3*i+2]); + dest, dest+3, SU2_MPI::GetComm(), &commReqs[3*i+2]); #endif } @@ -1700,7 +1700,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { /* Block until a message with shorts arrives from any processor. Determine the source and the size of the message. */ SU2_MPI::Status status; - SU2_MPI::Probe(MPI_ANY_SOURCE, rank+1, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(MPI_ANY_SOURCE, rank+1, SU2_MPI::GetComm(), &status); sourceRank[i] = status.MPI_SOURCE; int sizeMess; @@ -1709,24 +1709,24 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { /* Allocate the memory for the short receive buffer and receive the message. */ shortRecvBuf[i].resize(sizeMess); SU2_MPI::Recv(shortRecvBuf[i].data(), sizeMess, MPI_SHORT, - sourceRank[i], rank+1, MPI_COMM_WORLD, &status); + sourceRank[i], rank+1, SU2_MPI::GetComm(), &status); /* Block until the corresponding message with longs arrives, determine its size, allocate the memory and receive the message. */ - SU2_MPI::Probe(sourceRank[i], rank+2, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(sourceRank[i], rank+2, SU2_MPI::GetComm(), &status); SU2_MPI::Get_count(&status, MPI_LONG, &sizeMess); longRecvBuf[i].resize(sizeMess); SU2_MPI::Recv(longRecvBuf[i].data(), sizeMess, MPI_LONG, - sourceRank[i], rank+2, MPI_COMM_WORLD, &status); + sourceRank[i], rank+2, SU2_MPI::GetComm(), &status); /* Idem for the message with doubles. */ - SU2_MPI::Probe(sourceRank[i], rank+3, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(sourceRank[i], rank+3, SU2_MPI::GetComm(), &status); SU2_MPI::Get_count(&status, MPI_DOUBLE, &sizeMess); doubleRecvBuf[i].resize(sizeMess); SU2_MPI::Recv(doubleRecvBuf[i].data(), sizeMess, MPI_DOUBLE, - sourceRank[i], rank+3, MPI_COMM_WORLD, &status); + sourceRank[i], rank+3, SU2_MPI::GetComm(), &status); } /* Complete the non-blocking sends. */ @@ -1734,7 +1734,7 @@ CMeshFEM::CMeshFEM(CGeometry *geometry, CConfig *config) { /* Wild cards have been used in the communication, so synchronize the ranks to avoid problems. */ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #else @@ -2420,7 +2420,7 @@ void CMeshFEM::SetPositive_ZArea(CConfig *config) { #ifdef HAVE_MPI su2double locArea = PositiveZArea; - SU2_MPI::Allreduce(&locArea, &PositiveZArea, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&locArea, &PositiveZArea, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); #endif /*---------------------------------------------------------------------------*/ @@ -3504,7 +3504,7 @@ void CMeshFEM_DG::SetSendReceive(const CConfig *config) { vector sizeReduce(size, 1); SU2_MPI::Reduce_scatter(recvFromRank.data(), &nRankSend, sizeReduce.data(), - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_INT, MPI_SUM, SU2_MPI::GetComm()); /* Resize ranksSend and the first index of entitiesSend to the number of ranks to which this rank has to send data. */ @@ -3517,7 +3517,7 @@ void CMeshFEM_DG::SetSendReceive(const CConfig *config) { for(unsigned long i=0; i nDOFsPerRank(size); SU2_MPI::Allgather(&nDOFsLoc, 1, MPI_UNSIGNED_LONG, nDOFsPerRank.data(), 1, - MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); /* Determine the offset for the DOFs on this rank. */ unsigned long offsetRank = 0; @@ -1184,7 +1184,7 @@ void CPhysicalGeometry::Read_CGNS_Format_Parallel_FEM(CConfig *config, int nRankRecv; vector sizeRecv(size, 1); SU2_MPI::Reduce_scatter(sendToRank.data(), &nRankRecv, sizeRecv.data(), - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_INT, MPI_SUM, SU2_MPI::GetComm()); /*--- Send out the messages with the global node numbers. Use nonblocking sends to avoid deadlock. ---*/ @@ -1193,7 +1193,7 @@ void CPhysicalGeometry::Read_CGNS_Format_Parallel_FEM(CConfig *config, for(int i=0; i faceRecvBuf(sizeMess); SU2_MPI::Recv(faceRecvBuf.data(), faceRecvBuf.size(), MPI_UNSIGNED_LONG, - source, rank+4, MPI_COMM_WORLD, &status); + source, rank+4, SU2_MPI::GetComm(), &status); /* Loop to extract the data from the receive buffer. */ int ii = 0; @@ -1432,7 +1432,7 @@ void CPhysicalGeometry::Read_CGNS_Format_Parallel_FEM(CConfig *config, /* Complete the non-blocking sends. Afterwards, synchronize the ranks, because wild cards have been used. */ SU2_MPI::Waitall(sendReqs.size(), sendReqs.data(), MPI_STATUSES_IGNORE); - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif @@ -1681,7 +1681,7 @@ void CPhysicalGeometry::Read_CGNS_Format_Parallel_FEM(CConfig *config, } SU2_MPI::Reduce_scatter(sendToRank.data(), &nRankRecv, sizeRecv.data(), - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_INT, MPI_SUM, SU2_MPI::GetComm()); /*--- Send the messages using non-blocking sends to avoid deadlock. ---*/ sendReqs.resize(nRankSend); @@ -1689,7 +1689,7 @@ void CPhysicalGeometry::Read_CGNS_Format_Parallel_FEM(CConfig *config, for(int i=0; i boundElemRecvBuf(sizeMess); SU2_MPI::Recv(boundElemRecvBuf.data(), sizeMess, MPI_UNSIGNED_LONG, - source, rank+5, MPI_COMM_WORLD, &status); + source, rank+5, SU2_MPI::GetComm(), &status); /* Loop to extract the data from the receive buffer. */ int ii = 0; @@ -1783,7 +1783,7 @@ void CPhysicalGeometry::Read_CGNS_Format_Parallel_FEM(CConfig *config, } SU2_MPI::Reduce_scatter(sendToRank.data(), &nRankRecv, sizeRecv.data(), - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_INT, MPI_SUM, SU2_MPI::GetComm()); /*--- Send the messages using non-blocking sends to avoid deadlock. ---*/ sendReqs.resize(nRankSend); @@ -1791,7 +1791,7 @@ void CPhysicalGeometry::Read_CGNS_Format_Parallel_FEM(CConfig *config, for(int i=0; i boundElemRecvBuf(sizeMess); SU2_MPI::Recv(boundElemRecvBuf.data(), sizeMess, MPI_UNSIGNED_LONG, - source, rank+6, MPI_COMM_WORLD, &status); + source, rank+6, SU2_MPI::GetComm(), &status); /* Loop to extract the data from the receive buffer. */ int ii = 0; @@ -1853,7 +1853,7 @@ void CPhysicalGeometry::Read_CGNS_Format_Parallel_FEM(CConfig *config, because wild cards have been used. */ SU2_MPI::Waitall(sendReqs.size(), sendReqs.data(), MPI_STATUSES_IGNORE); - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #else /*--- Sequential mode. All boundary elements read must be stored on this @@ -2059,7 +2059,7 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { unsigned long maxPointID; SU2_MPI::Allreduce(&maxPointIDLoc, &maxPointID, 1, MPI_UNSIGNED_LONG, - MPI_MAX, MPI_COMM_WORLD); + MPI_MAX, SU2_MPI::GetComm()); ++maxPointID; /*--- Create a vector with a linear distribution over the ranks for @@ -2126,7 +2126,7 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { unsigned long nMessRecv; SU2_MPI::Reduce_scatter(counter.data(), &nMessRecv, sizeRecv.data(), - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); /*--- Send the data using nonblocking sends. ---*/ vector commReqs(max(nMessSend,nMessRecv)); @@ -2137,7 +2137,7 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { if( nFacesComm[i] ) { unsigned long count = 9*nFacesComm[i]; SU2_MPI::Isend(&sendBufFace[indSend], count, MPI_UNSIGNED_LONG, i, i, - MPI_COMM_WORLD, &commReqs[nMessSend]); + SU2_MPI::GetComm(), &commReqs[nMessSend]); ++nMessSend; indSend += count; } @@ -2151,14 +2151,14 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { nFacesRecv[0] = 0; for(unsigned long i=0; i recvBuf(sizeMess); SU2_MPI::Recv(recvBuf.data(), sizeMess, MPI_UNSIGNED_LONG, - rankRecv[i], rank, MPI_COMM_WORLD, &status); + rankRecv[i], rank, SU2_MPI::GetComm(), &status); nFacesRecv[i+1] = nFacesRecv[i] + sizeMess/9; facesRecv.resize(nFacesRecv[i+1]); @@ -2237,7 +2237,7 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { unsigned long count = ii - indSend; SU2_MPI::Isend(&sendBufFace[indSend], count, MPI_UNSIGNED_LONG, rankRecv[i], - rankRecv[i]+1, MPI_COMM_WORLD, &commReqs[i]); + rankRecv[i]+1, SU2_MPI::GetComm(), &commReqs[i]); indSend = ii; } @@ -2246,13 +2246,13 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { The return data contains information about the neighboring element. ---*/ for(unsigned long i=0; i recvBuf(sizeMess); SU2_MPI::Recv(recvBuf.data(), sizeMess, MPI_UNSIGNED_LONG, - status.MPI_SOURCE, rank+1, MPI_COMM_WORLD, &status); + status.MPI_SOURCE, rank+1, SU2_MPI::GetComm(), &status); sizeMess /= 9; unsigned long jj = 0; @@ -2278,7 +2278,7 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { /*--- Wild cards have been used in the communication, so synchronize the ranks to avoid problems. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif @@ -2310,7 +2310,7 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { #ifdef HAVE_MPI SU2_MPI::Reduce(&nFacesLocOr, &nNonMatchingFaces, 1, MPI_UNSIGNED_LONG, - MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); #endif if(rank == MASTER_NODE && nNonMatchingFaces) { cout << "There are " << nNonMatchingFaces << " non-matching faces in the grid. " @@ -2568,7 +2568,7 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { int nRankRecv; vector sizeSend(size, 1); SU2_MPI::Reduce_scatter(sendToRank.data(), &nRankRecv, sizeSend.data(), - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_INT, MPI_SUM, SU2_MPI::GetComm()); /* Send the data using non-blocking sends. */ vector sendReqs(nRankSend); @@ -2576,7 +2576,7 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { for(int i=0; i recvBuf(sizeMess); SU2_MPI::Recv(recvBuf.data(), sizeMess, MPI_UNSIGNED_LONG, - source, rank, MPI_COMM_WORLD, &status); + source, rank, SU2_MPI::GetComm(), &status); /* Loop over the contents of the receive buffer and update the graph accordingly. */ @@ -2610,7 +2610,7 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { /* Complete the non-blocking sends amd synchronize the ranks, because wild cards have been used in the above communication. */ SU2_MPI::Waitall(nRankSend, sendReqs.data(), MPI_STATUSES_IGNORE); - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif @@ -2687,7 +2687,7 @@ void CPhysicalGeometry::SetColorFEMGrid_Parallel(CConfig *config) { if (rank == MASTER_NODE) cout << "Calling ParMETIS..."; idx_t edgecut; - MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm comm = SU2_MPI::GetComm(); ParMETIS_V3_PartKway(vtxdist.data(), xadjPar.data(), adjacencyPar.data(), vwgtPar.data(), adjwgtPar.data(), &wgtflag, &numflag, &ncon, &nparts, tpwgts.data(), ubvec, options, @@ -2843,7 +2843,7 @@ void CPhysicalGeometry::DeterminePeriodicFacesFEMGrid(CConfig *co int sizeLocal = facesDonor.size(); SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1, - MPI_INT, MPI_COMM_WORLD); + MPI_INT, SU2_MPI::GetComm()); /*--- Create the data for the vector displs from the known values of recvCounts. Also determine the total size of the data. ---*/ @@ -2898,7 +2898,7 @@ void CPhysicalGeometry::DeterminePeriodicFacesFEMGrid(CConfig *co SU2_MPI::Allgatherv(longLocBuf.data(), longLocBuf.size(), MPI_UNSIGNED_LONG, longGlobBuf.data(), recvCounts.data(), displs.data(), - MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); for(int i=0; i bufBoundaryElemIDGlobalSearch(nGlobalSearchPoints); SU2_MPI::Allgatherv(boundaryElemIDGlobalSearch.data(), nLocalSearchPoints, MPI_UNSIGNED_LONG, bufBoundaryElemIDGlobalSearch.data(), recvCounts.data(), displs.data(), MPI_UNSIGNED_LONG, - MPI_COMM_WORLD); + SU2_MPI::GetComm()); for(int i=0; i bufCoorExGlobalSearch(nDim*nGlobalSearchPoints); SU2_MPI::Allgatherv(coorExGlobalSearch.data(), nDim*nLocalSearchPoints, MPI_DOUBLE, bufCoorExGlobalSearch.data(), recvCounts.data(), displs.data(), MPI_DOUBLE, - MPI_COMM_WORLD); + SU2_MPI::GetComm()); /* Buffers to store the return information. */ vector markerIDReturn; @@ -3878,7 +3878,7 @@ void CPhysicalGeometry::DetermineDonorElementsWallFunctions(CConfig *config) { int nRankRecv; SU2_MPI::Reduce_scatter(recvCounts.data(), &nRankRecv, displs.data(), - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_INT, MPI_SUM, SU2_MPI::GetComm()); /* Send the data using nonblocking sends to avoid deadlock. */ vector commReqs(3*nRankSend); @@ -3887,13 +3887,13 @@ void CPhysicalGeometry::DetermineDonorElementsWallFunctions(CConfig *config) { if( recvCounts[i] ) { const int sizeMessage = nSearchPerRank[i+1] - nSearchPerRank[i]; SU2_MPI::Isend(markerIDReturn.data() + nSearchPerRank[i], - sizeMessage, MPI_UNSIGNED_SHORT, i, i, MPI_COMM_WORLD, + sizeMessage, MPI_UNSIGNED_SHORT, i, i, SU2_MPI::GetComm(), &commReqs[nRankSend++]); SU2_MPI::Isend(boundaryElemIDReturn.data() + nSearchPerRank[i], - sizeMessage, MPI_UNSIGNED_LONG, i, i+1, MPI_COMM_WORLD, + sizeMessage, MPI_UNSIGNED_LONG, i, i+1, SU2_MPI::GetComm(), &commReqs[nRankSend++]); SU2_MPI::Isend(volElemIDDonorReturn.data() + nSearchPerRank[i], - sizeMessage, MPI_UNSIGNED_LONG, i, i+2, MPI_COMM_WORLD, + sizeMessage, MPI_UNSIGNED_LONG, i, i+2, SU2_MPI::GetComm(), &commReqs[nRankSend++]); } } @@ -3904,7 +3904,7 @@ void CPhysicalGeometry::DetermineDonorElementsWallFunctions(CConfig *config) { /* Block until a message with unsigned shorts arrives from any processor. Determine the source and the size of the message. */ SU2_MPI::Status status; - SU2_MPI::Probe(MPI_ANY_SOURCE, rank, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(MPI_ANY_SOURCE, rank, SU2_MPI::GetComm(), &status); int source = status.MPI_SOURCE; int sizeMess; @@ -3917,13 +3917,13 @@ void CPhysicalGeometry::DetermineDonorElementsWallFunctions(CConfig *config) { /* Receive the three messages using blocking receives. */ SU2_MPI::Recv(bufMarkerIDReturn.data(), sizeMess, MPI_UNSIGNED_SHORT, - source, rank, MPI_COMM_WORLD, &status); + source, rank, SU2_MPI::GetComm(), &status); SU2_MPI::Recv(bufBoundaryElemIDReturn.data(), sizeMess, MPI_UNSIGNED_LONG, - source, rank+1, MPI_COMM_WORLD, &status); + source, rank+1, SU2_MPI::GetComm(), &status); SU2_MPI::Recv(bufVolElemIDDonorReturn.data(), sizeMess, MPI_UNSIGNED_LONG, - source, rank+2, MPI_COMM_WORLD, &status); + source, rank+2, SU2_MPI::GetComm(), &status); /* Loop over the data just received and add it to the wall function donor information of the corresponding boundary element. */ @@ -3941,7 +3941,7 @@ void CPhysicalGeometry::DetermineDonorElementsWallFunctions(CConfig *config) { /* Wild cards have been used in the communication, so synchronize the ranks to avoid problems. */ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /* Loop again over the boundary elements of the marker for which a wall function treatment must be used and make remove the multiple entries @@ -4057,7 +4057,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( int nRankRecv; vector sizeSend(size, 1); SU2_MPI::Reduce_scatter(recvFromRank.data(), &nRankRecv, sizeSend.data(), - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_INT, MPI_SUM, SU2_MPI::GetComm()); /* Determine the number of messages this rank will send. */ int nRankSend = 0; @@ -4075,7 +4075,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( sendBufAddExternals[i].erase(lastElem, sendBufAddExternals[i].end()); SU2_MPI::Isend(sendBufAddExternals[i].data(), sendBufAddExternals[i].size(), - MPI_UNSIGNED_LONG, i, i, MPI_COMM_WORLD, &sendReqs[nRankSend++]); + MPI_UNSIGNED_LONG, i, i, SU2_MPI::GetComm(), &sendReqs[nRankSend++]); } } @@ -4086,7 +4086,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( /* Block until a message arrives and determine the source and size of the message. Allocate the memory for a receive buffer. */ SU2_MPI::Status status; - SU2_MPI::Probe(MPI_ANY_SOURCE, rank, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(MPI_ANY_SOURCE, rank, SU2_MPI::GetComm(), &status); int source = status.MPI_SOURCE; int sizeMess; @@ -4094,7 +4094,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( vector recvBuf(sizeMess); SU2_MPI::Recv(recvBuf.data(), sizeMess, MPI_UNSIGNED_LONG, - source, rank, MPI_COMM_WORLD, &status); + source, rank, SU2_MPI::GetComm(), &status); /* Loop over the entries of recvBuf and add them to mapExternalElemIDToTimeLevel, if not present already. */ @@ -4109,7 +4109,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( /* Complete the non-blocking sends. Synchronize the processors afterwards, because wild cards have been used in the communication. */ SU2_MPI::Waitall(nRankSend, sendReqs.data(), MPI_STATUSES_IGNORE); - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif @@ -4185,7 +4185,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( Only needed for a parallel implementation. */ #ifdef HAVE_MPI su2double locVal = minDeltaT; - SU2_MPI::Allreduce(&locVal, &minDeltaT, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&locVal, &minDeltaT, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); #endif /* Initial estimate of the time level of the owned elements. */ @@ -4244,7 +4244,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( which I will send data. */ nRankRecv = mapRankToIndRecv.size(); SU2_MPI::Reduce_scatter(recvFromRank.data(), &nRankSend, sizeSend.data(), - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI_INT, MPI_SUM, SU2_MPI::GetComm()); /*--- Create the vector of vectors of the global element ID's that will be received from other ranks. ---*/ @@ -4282,7 +4282,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( recvElem[i].erase(lastElem, recvElem[i].end()); SU2_MPI::Isend(recvElem[i].data(), recvElem[i].size(), MPI_UNSIGNED_LONG, - MRI->first, MRI->first, MPI_COMM_WORLD, &sendReqs[i]); + MRI->first, MRI->first, SU2_MPI::GetComm(), &sendReqs[i]); } /*--- Receive the messages in arbitrary sequence and store the requested @@ -4294,7 +4294,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( for(int i=0; ifirst, rank, MPI_COMM_WORLD, &status); + MRI->first, rank, SU2_MPI::GetComm(), &status); for(unsigned long j=0; jGetTimeLevel(); SU2_MPI::Isend(sendBuf[i].data(), sendElem[i].size(), MPI_UNSIGNED_SHORT, - sendRank[i], sendRank[i], MPI_COMM_WORLD, &sendReqs[i]); + sendRank[i], sendRank[i], SU2_MPI::GetComm(), &sendReqs[i]); } /*--- Receive the data for the externals. As this data is needed @@ -4568,7 +4568,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( SU2_MPI::Status status; SU2_MPI::Recv(returnBuf[i].data(), recvElem[i].size(), MPI_UNSIGNED_SHORT, - MRI->first, rank, MPI_COMM_WORLD, &status); + MRI->first, rank, SU2_MPI::GetComm(), &status); for(unsigned long j=0; jfirst, MRI->first+1, MPI_COMM_WORLD, &returnReqs[i]); + MRI->first, MRI->first+1, SU2_MPI::GetComm(), &returnReqs[i]); } /* Complete the first round of nonblocking sends, such that the @@ -4594,7 +4594,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( SU2_MPI::Status status; SU2_MPI::Recv(sendBuf[i].data(), sendElem[i].size(), MPI_UNSIGNED_SHORT, - sendRank[i], rank+1, MPI_COMM_WORLD, &status); + sendRank[i], rank+1, SU2_MPI::GetComm(), &status); for(unsigned long j=0; jSetTimeLevel(sendBuf[i][j]); @@ -4625,7 +4625,7 @@ void CPhysicalGeometry::DetermineTimeLevelElements( #ifdef HAVE_MPI SU2_MPI::Reduce(nLocalElemPerLevel.data(), nGlobalElemPerLevel.data(), nTimeLevels, MPI_UNSIGNED_LONG, MPI_SUM, - MASTER_NODE, MPI_COMM_WORLD); + MASTER_NODE, SU2_MPI::GetComm()); #endif /* Write the output. */ @@ -4655,7 +4655,7 @@ void CPhysicalGeometry::ComputeFEMGraphWeights( #ifdef HAVE_MPI unsigned short maxTimeLevelLocal = maxTimeLevel; SU2_MPI::Allreduce(&maxTimeLevelLocal, &maxTimeLevel, 1, - MPI_UNSIGNED_SHORT, MPI_MAX, MPI_COMM_WORLD); + MPI_UNSIGNED_SHORT, MPI_MAX, SU2_MPI::GetComm()); #endif /*--------------------------------------------------------------------------*/ @@ -4891,7 +4891,7 @@ void CPhysicalGeometry::ComputeFEMGraphWeights( #ifdef HAVE_MPI su2double locminvwgt = minvwgt; - SU2_MPI::Allreduce(&locminvwgt, &minvwgt, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&locminvwgt, &minvwgt, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); #endif /*--- Scale the workload of the elements, the 1st vertex weight, with the diff --git a/Common/src/geometry/CGeometry.cpp b/Common/src/geometry/CGeometry.cpp index 8cfc74f5c24c..8710b45421e6 100644 --- a/Common/src/geometry/CGeometry.cpp +++ b/Common/src/geometry/CGeometry.cpp @@ -224,7 +224,7 @@ void CGeometry::PreprocessP2PComms(CGeometry *geometry, many cells it will receive from each other processor. ---*/ SU2_MPI::Alltoall(&(nPoint_Send_All[1]), 1, MPI_INT, - &(nPoint_Recv_All[1]), 1, MPI_INT, MPI_COMM_WORLD); + &(nPoint_Recv_All[1]), 1, MPI_INT, SU2_MPI::GetComm()); /*--- Prepare to send connectivities. First check how many messages we will be sending and receiving. Here we also put @@ -452,11 +452,11 @@ void CGeometry::PostP2PRecvs(CGeometry *geometry, switch (commType) { case COMM_TYPE_DOUBLE: SU2_MPI::Irecv(&(bufD_P2PSend[offset]), count, MPI_DOUBLE, - source, tag, MPI_COMM_WORLD, &(req_P2PRecv[iRecv])); + source, tag, SU2_MPI::GetComm(), &(req_P2PRecv[iRecv])); break; case COMM_TYPE_UNSIGNED_SHORT: SU2_MPI::Irecv(&(bufS_P2PSend[offset]), count, MPI_UNSIGNED_SHORT, - source, tag, MPI_COMM_WORLD, &(req_P2PRecv[iRecv])); + source, tag, SU2_MPI::GetComm(), &(req_P2PRecv[iRecv])); break; default: SU2_MPI::Error("Unrecognized data type for point-to-point MPI comms.", @@ -489,11 +489,11 @@ void CGeometry::PostP2PRecvs(CGeometry *geometry, switch (commType) { case COMM_TYPE_DOUBLE: SU2_MPI::Irecv(&(bufD_P2PRecv[offset]), count, MPI_DOUBLE, - source, tag, MPI_COMM_WORLD, &(req_P2PRecv[iMessage])); + source, tag, SU2_MPI::GetComm(), &(req_P2PRecv[iMessage])); break; case COMM_TYPE_UNSIGNED_SHORT: SU2_MPI::Irecv(&(bufS_P2PRecv[offset]), count, MPI_UNSIGNED_SHORT, - source, tag, MPI_COMM_WORLD, &(req_P2PRecv[iMessage])); + source, tag, SU2_MPI::GetComm(), &(req_P2PRecv[iMessage])); break; default: SU2_MPI::Error("Unrecognized data type for point-to-point MPI comms.", @@ -551,11 +551,11 @@ void CGeometry::PostP2PSends(CGeometry *geometry, switch (commType) { case COMM_TYPE_DOUBLE: SU2_MPI::Isend(&(bufD_P2PRecv[offset]), count, MPI_DOUBLE, - dest, tag, MPI_COMM_WORLD, &(req_P2PSend[val_iSend])); + dest, tag, SU2_MPI::GetComm(), &(req_P2PSend[val_iSend])); break; case COMM_TYPE_UNSIGNED_SHORT: SU2_MPI::Isend(&(bufS_P2PRecv[offset]), count, MPI_UNSIGNED_SHORT, - dest, tag, MPI_COMM_WORLD, &(req_P2PSend[val_iSend])); + dest, tag, SU2_MPI::GetComm(), &(req_P2PSend[val_iSend])); break; default: SU2_MPI::Error("Unrecognized data type for point-to-point MPI comms.", @@ -588,11 +588,11 @@ void CGeometry::PostP2PSends(CGeometry *geometry, switch (commType) { case COMM_TYPE_DOUBLE: SU2_MPI::Isend(&(bufD_P2PSend[offset]), count, MPI_DOUBLE, - dest, tag, MPI_COMM_WORLD, &(req_P2PSend[val_iSend])); + dest, tag, SU2_MPI::GetComm(), &(req_P2PSend[val_iSend])); break; case COMM_TYPE_UNSIGNED_SHORT: SU2_MPI::Isend(&(bufS_P2PSend[offset]), count, MPI_UNSIGNED_SHORT, - dest, tag, MPI_COMM_WORLD, &(req_P2PSend[val_iSend])); + dest, tag, SU2_MPI::GetComm(), &(req_P2PSend[val_iSend])); break; default: SU2_MPI::Error("Unrecognized data type for point-to-point MPI comms.", @@ -927,7 +927,7 @@ void CGeometry::PreprocessPeriodicComms(CGeometry *geometry, many periodic points it will receive from each other processor. ---*/ SU2_MPI::Alltoall(&(nPoint_Send_All[1]), 1, MPI_INT, - &(nPoint_Recv_All[1]), 1, MPI_INT, MPI_COMM_WORLD); + &(nPoint_Recv_All[1]), 1, MPI_INT, SU2_MPI::GetComm()); /*--- Check how many messages we will be sending and receiving. Here we also put the counters into cumulative storage format to @@ -1112,7 +1112,7 @@ void CGeometry::PreprocessPeriodicComms(CGeometry *geometry, /*--- Post non-blocking recv for this proc. ---*/ SU2_MPI::Irecv(&(static_cast(idRecv)[offset]), - count, MPI_UNSIGNED_LONG, source, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_LONG, source, tag, SU2_MPI::GetComm(), &(req_PeriodicRecv[iMessage])); /*--- Increment message counter. ---*/ @@ -1143,7 +1143,7 @@ void CGeometry::PreprocessPeriodicComms(CGeometry *geometry, /*--- Post non-blocking send for this proc. ---*/ SU2_MPI::Isend(&(static_cast(idSend)[offset]), - count, MPI_UNSIGNED_LONG, dest, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_LONG, dest, tag, SU2_MPI::GetComm(), &(req_PeriodicSend[iMessage])); /*--- Increment message counter. ---*/ @@ -1268,12 +1268,12 @@ void CGeometry::PostPeriodicRecvs(CGeometry *geometry, switch (commType) { case COMM_TYPE_DOUBLE: SU2_MPI::Irecv(&(static_cast(bufD_PeriodicRecv)[offset]), - count, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, + count, MPI_DOUBLE, source, tag, SU2_MPI::GetComm(), &(req_PeriodicRecv[iRecv])); break; case COMM_TYPE_UNSIGNED_SHORT: SU2_MPI::Irecv(&(static_cast(bufS_PeriodicRecv)[offset]), - count, MPI_UNSIGNED_SHORT, source, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_SHORT, source, tag, SU2_MPI::GetComm(), &(req_PeriodicRecv[iRecv])); break; default: @@ -1324,12 +1324,12 @@ void CGeometry::PostPeriodicSends(CGeometry *geometry, switch (commType) { case COMM_TYPE_DOUBLE: SU2_MPI::Isend(&(static_cast(bufD_PeriodicSend)[offset]), - count, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD, + count, MPI_DOUBLE, dest, tag, SU2_MPI::GetComm(), &(req_PeriodicSend[val_iSend])); break; case COMM_TYPE_UNSIGNED_SHORT: SU2_MPI::Isend(&(static_cast(bufS_PeriodicSend)[offset]), - count, MPI_UNSIGNED_SHORT, dest, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_SHORT, dest, tag, SU2_MPI::GetComm(), &(req_PeriodicSend[val_iSend])); break; default: @@ -1991,8 +1991,8 @@ void CGeometry::ComputeAirfoil_Section(su2double *Plane_P0, su2double *Plane_Nor Buffer_Send_nEdge[0] = nLocalEdge; - SU2_MPI::Allreduce(&nLocalEdge, &MaxLocalEdge, 1, MPI_UNSIGNED_LONG, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allgather(Buffer_Send_nEdge, 1, MPI_UNSIGNED_LONG, Buffer_Receive_nEdge, 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nLocalEdge, &MaxLocalEdge, 1, MPI_UNSIGNED_LONG, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allgather(Buffer_Send_nEdge, 1, MPI_UNSIGNED_LONG, Buffer_Receive_nEdge, 1, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); Buffer_Send_Coord = new su2double [MaxLocalEdge*6]; Buffer_Receive_Coord = new su2double [nProcessor*MaxLocalEdge*6]; @@ -2024,9 +2024,9 @@ void CGeometry::ComputeAirfoil_Section(su2double *Plane_P0, su2double *Plane_Nor Buffer_Send_GlobalID[iEdge*4 + 3] = JGlobalID_Index1[iEdge]; } - SU2_MPI::Allgather(Buffer_Send_Coord, nBuffer_Coord, MPI_DOUBLE, Buffer_Receive_Coord, nBuffer_Coord, MPI_DOUBLE, MPI_COMM_WORLD); - SU2_MPI::Allgather(Buffer_Send_Variable, nBuffer_Variable, MPI_DOUBLE, Buffer_Receive_Variable, nBuffer_Variable, MPI_DOUBLE, MPI_COMM_WORLD); - SU2_MPI::Allgather(Buffer_Send_GlobalID, nBuffer_GlobalID, MPI_UNSIGNED_LONG, Buffer_Receive_GlobalID, nBuffer_GlobalID, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + SU2_MPI::Allgather(Buffer_Send_Coord, nBuffer_Coord, MPI_DOUBLE, Buffer_Receive_Coord, nBuffer_Coord, MPI_DOUBLE, SU2_MPI::GetComm()); + SU2_MPI::Allgather(Buffer_Send_Variable, nBuffer_Variable, MPI_DOUBLE, Buffer_Receive_Variable, nBuffer_Variable, MPI_DOUBLE, SU2_MPI::GetComm()); + SU2_MPI::Allgather(Buffer_Send_GlobalID, nBuffer_GlobalID, MPI_UNSIGNED_LONG, Buffer_Receive_GlobalID, nBuffer_GlobalID, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); /*--- Clean the vectors before adding the new vertices only to the master node ---*/ @@ -2728,7 +2728,7 @@ void CGeometry::ComputeSurf_Straightness(CConfig *config, /*--- Product of type (bool) is equivalnt to a 'logical and' ---*/ SU2_MPI::Allreduce(Buff_Send_isStraight.data(), Buff_Recv_isStraight.data(), - nMarker_Global, MPI_INT, MPI_PROD, MPI_COMM_WORLD); + nMarker_Global, MPI_INT, MPI_PROD, SU2_MPI::GetComm()); /*--- Print results on screen. ---*/ if(rank == MASTER_NODE) { @@ -2996,9 +2996,9 @@ void CGeometry::ComputeSurf_Curvature(CConfig *config) { su2double MyMeanK = MeanK; MeanK = 0.0; su2double MyMaxK = MaxK; MaxK = 0.0; unsigned long MynPointDomain = TotalnPointDomain; TotalnPointDomain = 0; - SU2_MPI::Allreduce(&MyMeanK, &MeanK, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyMaxK, &MaxK, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MynPointDomain, &TotalnPointDomain, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyMeanK, &MeanK, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyMaxK, &MaxK, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MynPointDomain, &TotalnPointDomain, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); /*--- Compute the mean ---*/ MeanK /= su2double(TotalnPointDomain); @@ -3017,7 +3017,7 @@ void CGeometry::ComputeSurf_Curvature(CConfig *config) { } su2double MySigmaK = SigmaK; SigmaK = 0.0; - SU2_MPI::Allreduce(&MySigmaK, &SigmaK, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MySigmaK, &SigmaK, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); SigmaK = sqrt(SigmaK/su2double(TotalnPointDomain)); @@ -3052,8 +3052,8 @@ void CGeometry::ComputeSurf_Curvature(CConfig *config) { /*--- Communicate to all processors the total number of critical edge nodes. ---*/ MaxLocalVertex = 0; - SU2_MPI::Allreduce(&nLocalVertex, &MaxLocalVertex, 1, MPI_UNSIGNED_LONG, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allgather(Buffer_Send_nVertex, 1, MPI_UNSIGNED_LONG, Buffer_Receive_nVertex, 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nLocalVertex, &MaxLocalVertex, 1, MPI_UNSIGNED_LONG, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allgather(Buffer_Send_nVertex, 1, MPI_UNSIGNED_LONG, Buffer_Receive_nVertex, 1, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); /*--- Create and initialize to zero some buffers to hold the coordinates of the boundary nodes that are communicated from each partition (all-to-all). ---*/ @@ -3071,7 +3071,7 @@ void CGeometry::ComputeSurf_Curvature(CConfig *config) { Buffer_Send_Coord[iVertex*nDim+iDim] = nodes->GetCoord(iPoint, iDim); } - SU2_MPI::Allgather(Buffer_Send_Coord, nBuffer, MPI_DOUBLE, Buffer_Receive_Coord, nBuffer, MPI_DOUBLE, MPI_COMM_WORLD); + SU2_MPI::Allgather(Buffer_Send_Coord, nBuffer, MPI_DOUBLE, Buffer_Receive_Coord, nBuffer, MPI_DOUBLE, SU2_MPI::GetComm()); /*--- Loop over all interior mesh nodes on the local partition and compute the distances to each of the no-slip boundary nodes in the entire mesh. @@ -3184,15 +3184,15 @@ void CGeometry::FilterValuesAtElementCG(const vector &filter_radius, SU2_OMP_MASTER { su2double* dbl_buffer = new su2double [Global_nElemDomain*nDim]; - SU2_MPI::Allreduce(cg_elem,dbl_buffer,Global_nElemDomain*nDim,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + SU2_MPI::Allreduce(cg_elem,dbl_buffer,Global_nElemDomain*nDim,MPI_DOUBLE,MPI_SUM,SU2_MPI::GetComm()); swap(dbl_buffer, cg_elem); delete [] dbl_buffer; dbl_buffer = new su2double [Global_nElemDomain]; - SU2_MPI::Allreduce(vol_elem,dbl_buffer,Global_nElemDomain,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + SU2_MPI::Allreduce(vol_elem,dbl_buffer,Global_nElemDomain,MPI_DOUBLE,MPI_SUM,SU2_MPI::GetComm()); swap(dbl_buffer, vol_elem); delete [] dbl_buffer; vector char_buffer(Global_nElemDomain); - MPI_Allreduce(halo_detect.data(),char_buffer.data(),Global_nElemDomain,MPI_CHAR,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(halo_detect.data(),char_buffer.data(),Global_nElemDomain,MPI_CHAR,MPI_SUM,SU2_MPI::GetComm()); halo_detect.swap(char_buffer); } SU2_OMP_BARRIER @@ -3234,7 +3234,7 @@ void CGeometry::FilterValuesAtElementCG(const vector &filter_radius, SU2_OMP_MASTER { su2double *buffer = new su2double [Global_nElemDomain]; - SU2_MPI::Allreduce(work_values,buffer,Global_nElemDomain,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + SU2_MPI::Allreduce(work_values,buffer,Global_nElemDomain,MPI_DOUBLE,MPI_SUM,SU2_MPI::GetComm()); swap(buffer, work_values); delete [] buffer; } SU2_OMP_BARRIER @@ -3315,7 +3315,7 @@ void CGeometry::FilterValuesAtElementCG(const vector &filter_radius, limited_searches /= kernels.size(); unsigned long tmp = limited_searches; - SU2_MPI::Reduce(&tmp,&limited_searches,1,MPI_UNSIGNED_LONG,MPI_SUM,MASTER_NODE,MPI_COMM_WORLD); + SU2_MPI::Reduce(&tmp,&limited_searches,1,MPI_UNSIGNED_LONG,MPI_SUM,MASTER_NODE,SU2_MPI::GetComm()); if (rank==MASTER_NODE && limited_searches>0) cout << "Warning: The filter radius was limited for " << limited_searches @@ -3353,7 +3353,7 @@ void CGeometry::GetGlobalElementAdjacencyMatrix(vector &neighbour /*--- Share with all processors ---*/ { unsigned short *buffer = new unsigned short [Global_nElemDomain]; - MPI_Allreduce(nFaces_elem,buffer,Global_nElemDomain,MPI_UNSIGNED_SHORT,MPI_MAX,MPI_COMM_WORLD); + MPI_Allreduce(nFaces_elem,buffer,Global_nElemDomain,MPI_UNSIGNED_SHORT,MPI_MAX,SU2_MPI::GetComm()); /*--- swap pointers and delete old data to keep the same variable name after reduction ---*/ swap(buffer, nFaces_elem); delete [] buffer; } @@ -3400,7 +3400,7 @@ void CGeometry::GetGlobalElementAdjacencyMatrix(vector &neighbour /*--- Share with all processors ---*/ { long *buffer = new long [matrix_size]; - MPI_Allreduce(neighbour_idx,buffer,matrix_size,MPI_LONG,MPI_MAX,MPI_COMM_WORLD); + MPI_Allreduce(neighbour_idx,buffer,matrix_size,MPI_LONG,MPI_MAX,SU2_MPI::GetComm()); swap(buffer, neighbour_idx); delete [] buffer; } #endif diff --git a/Common/src/geometry/CMultiGridGeometry.cpp b/Common/src/geometry/CMultiGridGeometry.cpp index 914b21a7d202..6e09d81b9cf7 100644 --- a/Common/src/geometry/CMultiGridGeometry.cpp +++ b/Common/src/geometry/CMultiGridGeometry.cpp @@ -513,9 +513,9 @@ CMultiGridGeometry::CMultiGridGeometry(CGeometry **geometry, CConfig *config_con #ifdef HAVE_MPI /*--- Send/Receive information using Sendrecv ---*/ SU2_MPI::Sendrecv(Buffer_Send_Children, nBufferS_Vector, MPI_UNSIGNED_LONG, send_to,0, - Buffer_Receive_Children, nBufferR_Vector, MPI_UNSIGNED_LONG, receive_from,0, MPI_COMM_WORLD, &status); + Buffer_Receive_Children, nBufferR_Vector, MPI_UNSIGNED_LONG, receive_from,0, SU2_MPI::GetComm(), &status); SU2_MPI::Sendrecv(Buffer_Send_Parent, nBufferS_Vector, MPI_UNSIGNED_LONG, send_to,1, - Buffer_Receive_Parent, nBufferR_Vector, MPI_UNSIGNED_LONG, receive_from,1, MPI_COMM_WORLD, &status); + Buffer_Receive_Parent, nBufferR_Vector, MPI_UNSIGNED_LONG, receive_from,1, SU2_MPI::GetComm(), &status); #else /*--- Receive information without MPI ---*/ for (iVertex = 0; iVertex < nVertexR; iVertex++) { @@ -612,8 +612,8 @@ CMultiGridGeometry::CMultiGridGeometry(CGeometry **geometry, CConfig *config_con Local_nPointCoarse = nPoint; Local_nPointFine = fine_grid->GetnPoint(); - SU2_MPI::Allreduce(&Local_nPointCoarse, &Global_nPointCoarse, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&Local_nPointFine, &Global_nPointFine, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Local_nPointCoarse, &Global_nPointCoarse, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&Local_nPointFine, &Global_nPointFine, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); su2double Coeff = 1.0, CFL = 0.0, factor = 1.5; diff --git a/Common/src/geometry/CPhysicalGeometry.cpp b/Common/src/geometry/CPhysicalGeometry.cpp index dc8ae548d27b..33a75ef557f3 100644 --- a/Common/src/geometry/CPhysicalGeometry.cpp +++ b/Common/src/geometry/CPhysicalGeometry.cpp @@ -256,9 +256,9 @@ CPhysicalGeometry::CPhysicalGeometry(CGeometry *geometry, nLocal_Bound_Elem = nLocal_Line + nLocal_BoundTria + nLocal_BoundQuad; SU2_MPI::Allreduce(&nLocal_Elem, &nGlobal_Elem, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); SU2_MPI::Allreduce(&nLocal_Bound_Elem, &nGlobal_Bound_Elem, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); /*--- With the distribution of all points, elements, and markers based on the ParMETIS coloring complete, as a final step, load this data into @@ -597,7 +597,7 @@ void CPhysicalGeometry::DistributeColoring(const CConfig *config, many points it will receive from each other processor. ---*/ SU2_MPI::Alltoall(&(nPoint_Send[1]), 1, MPI_INT, - &(nPoint_Recv[1]), 1, MPI_INT, MPI_COMM_WORLD); + &(nPoint_Recv[1]), 1, MPI_INT, SU2_MPI::GetComm()); /*--- Prepare to send colors. First check how many messages we will be sending and receiving. Here we also put @@ -840,7 +840,7 @@ void CPhysicalGeometry::DistributeVolumeConnectivity(const CConfig *config, many cells it will receive from each other processor. ---*/ SU2_MPI::Alltoall(&(nElem_Send[1]), 1, MPI_INT, - &(nElem_Recv[1]), 1, MPI_INT, MPI_COMM_WORLD); + &(nElem_Recv[1]), 1, MPI_INT, SU2_MPI::GetComm()); /*--- Prepare to send connectivities. First check how many messages we will be sending and receiving. Here we also put @@ -1137,7 +1137,7 @@ void CPhysicalGeometry::DistributePoints(const CConfig *config, CGeometry *geome many points it will receive from each other processor. ---*/ SU2_MPI::Alltoall(&(nPoint_Send[1]), 1, MPI_INT, - &(nPoint_Recv[1]), 1, MPI_INT, MPI_COMM_WORLD); + &(nPoint_Recv[1]), 1, MPI_INT, SU2_MPI::GetComm()); /*--- Prepare to send colors, ids, and coords. First check how many messages we will be sending and receiving. Here we also put @@ -1444,7 +1444,7 @@ void CPhysicalGeometry::PartitionSurfaceConnectivity(CConfig *config, many cells it will receive from each other processor. ---*/ SU2_MPI::Scatter(&(nElem_Send[1]), 1, MPI_INT, - &(nElem_Recv[1]), 1, MPI_INT, MASTER_NODE, MPI_COMM_WORLD); + &(nElem_Recv[1]), 1, MPI_INT, MASTER_NODE, SU2_MPI::GetComm()); /*--- Prepare to send connectivities. First check how many messages we will be sending and receiving. Here we also put @@ -1816,7 +1816,7 @@ void CPhysicalGeometry::DistributeSurfaceConnectivity(CConfig *config, many cells it will receive from each other processor. ---*/ SU2_MPI::Alltoall(&(nElem_Send[1]), 1, MPI_INT, - &(nElem_Recv[1]), 1, MPI_INT, MPI_COMM_WORLD); + &(nElem_Recv[1]), 1, MPI_INT, SU2_MPI::GetComm()); /*--- Prepare to send connectivities. First check how many messages we will be sending and receiving. Here we also put @@ -2086,7 +2086,7 @@ void CPhysicalGeometry::DistributeMarkerTags(CConfig *config, CGeometry *geometr /*--- Broadcast the global number of markers in the mesh. ---*/ SU2_MPI::Bcast(&nMarker_Global, 1, MPI_UNSIGNED_LONG, - MASTER_NODE, MPI_COMM_WORLD); + MASTER_NODE, SU2_MPI::GetComm()); char *mpi_str_buf = new char[nMarker_Global*MAX_STRING_SIZE](); if (rank == MASTER_NODE) { @@ -2099,7 +2099,7 @@ void CPhysicalGeometry::DistributeMarkerTags(CConfig *config, CGeometry *geometr /*--- Broadcast the string names of the variables. ---*/ SU2_MPI::Bcast(mpi_str_buf, (int)nMarker_Global*MAX_STRING_SIZE, MPI_CHAR, - MASTER_NODE, MPI_COMM_WORLD); + MASTER_NODE, SU2_MPI::GetComm()); /*--- Now parse the string names and load into our marker tag vector. We also need to set the values of all markers into the config. ---*/ @@ -2200,9 +2200,9 @@ void CPhysicalGeometry::LoadPoints(CConfig *config, CGeometry *geometry) { #ifdef HAVE_MPI SU2_MPI::Allreduce(&Local_nPoint, &Global_nPoint, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); SU2_MPI::Allreduce(&Local_nPointDomain, &Global_nPointDomain, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); #else Global_nPoint = Local_nPoint; Global_nPointDomain = Local_nPointDomain; @@ -2498,7 +2498,7 @@ void CPhysicalGeometry::LoadVolumeElements(CConfig *config, CGeometry *geometry) values are important for merging and writing output later. ---*/ SU2_MPI::Allreduce(&Local_Elem, &Global_nElem, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); if ((rank == MASTER_NODE) && (size > SINGLE_NODE)) cout << Global_nElem << " interior elements including halo cells. " << endl; @@ -2527,17 +2527,17 @@ void CPhysicalGeometry::LoadVolumeElements(CConfig *config, CGeometry *geometry) unsigned long Local_nElemPyramid = nelem_pyramid; SU2_MPI::Allreduce(&Local_nElemTri, &Global_nelem_triangle, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); SU2_MPI::Allreduce(&Local_nElemQuad, &Global_nelem_quad, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); SU2_MPI::Allreduce(&Local_nElemTet, &Global_nelem_tetra, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); SU2_MPI::Allreduce(&Local_nElemHex, &Global_nelem_hexa, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); SU2_MPI::Allreduce(&Local_nElemPrism, &Global_nelem_prism, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); SU2_MPI::Allreduce(&Local_nElemPyramid, &Global_nelem_pyramid, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); #else Global_nelem_triangle = nelem_triangle; Global_nelem_quad = nelem_quad; @@ -2903,37 +2903,37 @@ void CPhysicalGeometry::InitiateCommsAll(void *bufSend, switch (commType) { case COMM_TYPE_DOUBLE: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, + count, MPI_DOUBLE, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_UNSIGNED_LONG: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_UNSIGNED_LONG, source, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_LONG, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_LONG: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_LONG, source, tag, MPI_COMM_WORLD, + count, MPI_LONG, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_UNSIGNED_SHORT: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_UNSIGNED_SHORT, source, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_SHORT, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_CHAR: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_CHAR, source, tag, MPI_COMM_WORLD, + count, MPI_CHAR, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_SHORT: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_SHORT, source, tag, MPI_COMM_WORLD, + count, MPI_SHORT, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_INT: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_INT, source, tag, MPI_COMM_WORLD, + count, MPI_INT, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; default: @@ -2978,37 +2978,37 @@ void CPhysicalGeometry::InitiateCommsAll(void *bufSend, switch (commType) { case COMM_TYPE_DOUBLE: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD, + count, MPI_DOUBLE, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_UNSIGNED_LONG: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_UNSIGNED_LONG, dest, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_LONG, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_LONG: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_LONG, dest, tag, MPI_COMM_WORLD, + count, MPI_LONG, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_UNSIGNED_SHORT: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_UNSIGNED_SHORT, dest, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_SHORT, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_CHAR: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_CHAR, dest, tag, MPI_COMM_WORLD, + count, MPI_CHAR, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_SHORT: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_SHORT, dest, tag, MPI_COMM_WORLD, + count, MPI_SHORT, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_INT: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_INT, dest, tag, MPI_COMM_WORLD, + count, MPI_INT, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; default: @@ -3884,7 +3884,7 @@ void CPhysicalGeometry::LoadLinearlyPartitionedVolumeElements(CConfig *co the CGNS grid with all ranks. ---*/ auto reduce = [](unsigned long p, unsigned long& t) { - SU2_MPI::Allreduce(&p, &t, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&p, &t, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); }; reduce(nelem_triangle, Global_nelem_triangle); reduce(nelem_quad, Global_nelem_quad); @@ -4379,7 +4379,7 @@ void CPhysicalGeometry::Check_IntElem_Orientation(const CConfig *config) { auto reduce = [](unsigned long& val) { unsigned long tmp = val; - SU2_MPI::Allreduce(&tmp, &val, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &val, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); }; reduce(tria_flip); reduce(quad_flip); reduce(tet_flip); reduce(pyram_flip); @@ -4526,7 +4526,7 @@ void CPhysicalGeometry::Check_BoundElem_Orientation(const CConfig *config) { auto reduce = [](unsigned long& val) { unsigned long tmp = val; - SU2_MPI::Allreduce(&tmp, &val, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &val, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); }; reduce(line_flip); reduce(tria_flip); reduce(quad_flip); reduce(quad_error); @@ -4611,19 +4611,19 @@ void CPhysicalGeometry::SetPositive_ZArea(CConfig *config) { } - SU2_MPI::Allreduce(&PositiveXArea, &TotalPositiveXArea, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&PositiveYArea, &TotalPositiveYArea, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&PositiveZArea, &TotalPositiveZArea, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&PositiveXArea, &TotalPositiveXArea, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&PositiveYArea, &TotalPositiveYArea, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&PositiveZArea, &TotalPositiveZArea, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); - SU2_MPI::Allreduce(&MinCoordX, &TotalMinCoordX, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MinCoordY, &TotalMinCoordY, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MinCoordZ, &TotalMinCoordZ, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MinCoordX, &TotalMinCoordX, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MinCoordY, &TotalMinCoordY, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MinCoordZ, &TotalMinCoordZ, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); - SU2_MPI::Allreduce(&MaxCoordX, &TotalMaxCoordX, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MaxCoordY, &TotalMaxCoordY, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MaxCoordZ, &TotalMaxCoordZ, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MaxCoordX, &TotalMaxCoordX, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MaxCoordY, &TotalMaxCoordY, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MaxCoordZ, &TotalMaxCoordZ, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); - SU2_MPI::Allreduce(&WettedArea, &TotalWettedArea, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&WettedArea, &TotalWettedArea, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); /*--- Set a reference area if no value is provided ---*/ @@ -5128,8 +5128,8 @@ unsigned short iMarker, jMarker, iMarkerTP, iSpan, jSpan, kSpan = 0; nSpan_max = nSpan; My_nSpan = nSpan; nSpan = 0; My_MaxnSpan = nSpan_max; nSpan_max = 0; - SU2_MPI::Allreduce(&My_nSpan, &nSpan, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&My_MaxnSpan, &nSpan_max, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&My_nSpan, &nSpan, 1, MPI_INT, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&My_MaxnSpan, &nSpan_max, 1, MPI_INT, MPI_MAX, SU2_MPI::GetComm()); #endif /*--- initialize the vector that will contain the disordered values span-wise ---*/ @@ -5214,8 +5214,8 @@ unsigned short iMarker, jMarker, iMarkerTP, iSpan, jSpan, kSpan = 0; valueSpan[iSpan] = -1001.0; } - SU2_MPI::Allgather(MyValueSpan, nSpan_max , MPI_DOUBLE, MyTotValueSpan, nSpan_max, MPI_DOUBLE, MPI_COMM_WORLD); - SU2_MPI::Allgather(&nSpan_loc, 1 , MPI_INT, My_nSpan_loc, 1, MPI_INT, MPI_COMM_WORLD); + SU2_MPI::Allgather(MyValueSpan, nSpan_max , MPI_DOUBLE, MyTotValueSpan, nSpan_max, MPI_DOUBLE, SU2_MPI::GetComm()); + SU2_MPI::Allgather(&nSpan_loc, 1 , MPI_INT, My_nSpan_loc, 1, MPI_INT, SU2_MPI::GetComm()); jSpan = 0; for (iSize = 0; iSize< size; iSize++){ @@ -5334,8 +5334,8 @@ unsigned short iMarker, jMarker, iMarkerTP, iSpan, jSpan, kSpan = 0; #ifdef HAVE_MPI MyMin= min; min = 0; MyMax= max; max = 0; - SU2_MPI::Allreduce(&MyMin, &min, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyMax, &max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyMin, &min, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyMax, &max, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); #endif // cout <<"min " << min << endl; @@ -5858,9 +5858,9 @@ void CPhysicalGeometry::SetTurboVertex(CConfig *config, unsigned short val_iZone MyIntMin = minIntAngPitch[iSpan]; minIntAngPitch[iSpan] = 10.0E+6; MyMax = maxAngPitch[iSpan]; maxAngPitch[iSpan] = -10.0E+6; - SU2_MPI::Allreduce(&MyMin, &minAngPitch[iSpan], 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyIntMin, &minIntAngPitch[iSpan], 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyMax, &maxAngPitch[iSpan], 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyMin, &minAngPitch[iSpan], 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyIntMin, &minIntAngPitch[iSpan], 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyMax, &maxAngPitch[iSpan], 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); #endif @@ -5885,7 +5885,7 @@ void CPhysicalGeometry::SetTurboVertex(CConfig *config, unsigned short val_iZone #ifdef HAVE_MPI My_nVert = nVert;nVert = 0; - SU2_MPI::Allreduce(&My_nVert, &nVert, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&My_nVert, &nVert, 1, MPI_INT, MPI_SUM, SU2_MPI::GetComm()); #endif /*--- to be set for all the processor to initialize an appropriate number of frequency for the NR BC ---*/ @@ -5970,11 +5970,11 @@ void CPhysicalGeometry::SetTurboVertex(CConfig *config, unsigned short val_iZone } } } - SU2_MPI::Gather(y_loc[iSpan], nTotVertex_gb[iSpan] , MPI_DOUBLE, y_gb, nTotVertex_gb[iSpan], MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(x_loc[iSpan], nTotVertex_gb[iSpan] , MPI_DOUBLE, x_gb, nTotVertex_gb[iSpan], MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(z_loc[iSpan], nTotVertex_gb[iSpan] , MPI_DOUBLE, z_gb, nTotVertex_gb[iSpan], MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(angCoord_loc[iSpan], nTotVertex_gb[iSpan] , MPI_DOUBLE, angCoord_gb, nTotVertex_gb[iSpan], MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(deltaAngCoord_loc[iSpan], nTotVertex_gb[iSpan] , MPI_DOUBLE, deltaAngCoord_gb, nTotVertex_gb[iSpan], MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Gather(y_loc[iSpan], nTotVertex_gb[iSpan] , MPI_DOUBLE, y_gb, nTotVertex_gb[iSpan], MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(x_loc[iSpan], nTotVertex_gb[iSpan] , MPI_DOUBLE, x_gb, nTotVertex_gb[iSpan], MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(z_loc[iSpan], nTotVertex_gb[iSpan] , MPI_DOUBLE, z_gb, nTotVertex_gb[iSpan], MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(angCoord_loc[iSpan], nTotVertex_gb[iSpan] , MPI_DOUBLE, angCoord_gb, nTotVertex_gb[iSpan], MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(deltaAngCoord_loc[iSpan], nTotVertex_gb[iSpan] , MPI_DOUBLE, deltaAngCoord_gb, nTotVertex_gb[iSpan], MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); if (rank == MASTER_NODE){ for(iSpanVertex = 0; iSpanVertexSetDomainVolume(DomainVolume); if ((rank == MASTER_NODE) && (action == ALLOCATE)) { @@ -8199,7 +8199,7 @@ void CPhysicalGeometry::SetColorGrid_Parallel(const CConfig *config) { if (size == SINGLE_NODE) return; - MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm comm = SU2_MPI::GetComm(); /*--- Linear partitioner object to help prepare parmetis data. ---*/ @@ -8532,21 +8532,21 @@ void CPhysicalGeometry::ComputeMeshQualityStatistics(const CConfig *config) { su2double Global_Ortho_Min, Global_Ortho_Max; SU2_MPI::Allreduce(&orthoMin, &Global_Ortho_Min, 1, - MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); SU2_MPI::Allreduce(&orthoMax, &Global_Ortho_Max, 1, - MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); su2double Global_AR_Min, Global_AR_Max; SU2_MPI::Allreduce(&arMin, &Global_AR_Min, 1, - MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); SU2_MPI::Allreduce(&arMax, &Global_AR_Max, 1, - MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); su2double Global_VR_Min, Global_VR_Max; SU2_MPI::Allreduce(&vrMin, &Global_VR_Min, 1, - MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); SU2_MPI::Allreduce(&vrMax, &Global_VR_Max, 1, - MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); /*--- Print the summary to the console for the user. ---*/ @@ -8624,7 +8624,7 @@ void CPhysicalGeometry::SetBoundSensitivity(CConfig *config) { bool *PointInDomain; nPointLocal = nPoint; - SU2_MPI::Allreduce(&nPointLocal, &nPointGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nPointLocal, &nPointGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); Point2Vertex = new unsigned long[nPointGlobal][2]; PointInDomain = new bool[nPointGlobal]; @@ -8903,7 +8903,7 @@ void CPhysicalGeometry::SetSensitivity(CConfig *config) { /*--- All ranks open the file using MPI. ---*/ - ierr = MPI_File_open(MPI_COMM_WORLD, fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); + ierr = MPI_File_open(SU2_MPI::GetComm(), fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); /*--- Error check opening the file. ---*/ @@ -8920,7 +8920,7 @@ void CPhysicalGeometry::SetSensitivity(CConfig *config) { /*--- Broadcast the number of variables to all procs and store clearly. ---*/ - SU2_MPI::Bcast(Restart_Vars, nRestart_Vars, MPI_INT, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(Restart_Vars, nRestart_Vars, MPI_INT, MASTER_NODE, SU2_MPI::GetComm()); /*--- Check that this is an SU2 binary file. SU2 binary files have the hex representation of "SU2" as the first int in the file. ---*/ @@ -8951,7 +8951,7 @@ void CPhysicalGeometry::SetSensitivity(CConfig *config) { /*--- Broadcast the string names of the variables. ---*/ SU2_MPI::Bcast(mpi_str_buf, nFields*CGNS_STRING_SIZE, MPI_CHAR, - MASTER_NODE, MPI_COMM_WORLD); + MASTER_NODE, SU2_MPI::GetComm()); /*--- Now parse the string names and load into the config class in case we need them for writing visualization files (SU2_SOL). ---*/ @@ -9038,7 +9038,7 @@ void CPhysicalGeometry::SetSensitivity(CConfig *config) { /*--- Communicate metadata. ---*/ - SU2_MPI::Bcast(&Restart_Iter, 1, MPI_INT, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(&Restart_Iter, 1, MPI_INT, MASTER_NODE, SU2_MPI::GetComm()); /*--- Copy to a su2double structure (because of the SU2_MPI::Bcast doesn't work with passive data)---*/ @@ -9046,7 +9046,7 @@ void CPhysicalGeometry::SetSensitivity(CConfig *config) { for (unsigned short iVar = 0; iVar < 8; iVar++) Restart_Meta[iVar] = Restart_Meta_Passive[iVar]; - SU2_MPI::Bcast(Restart_Meta, 8, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(Restart_Meta, 8, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); /*--- All ranks close the file after writing. ---*/ @@ -9164,7 +9164,7 @@ void CPhysicalGeometry::SetSensitivity(CConfig *config) { /*--- All ranks open the file using MPI. ---*/ - ierr = MPI_File_open(MPI_COMM_WORLD, fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); + ierr = MPI_File_open(SU2_MPI::GetComm(), fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); /*--- Error check opening the file. ---*/ @@ -9179,7 +9179,7 @@ void CPhysicalGeometry::SetSensitivity(CConfig *config) { /*--- Broadcast the number of variables to all procs and store clearly. ---*/ - SU2_MPI::Bcast(&magic_number, 1, MPI_INT, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(&magic_number, 1, MPI_INT, MASTER_NODE, SU2_MPI::GetComm()); /*--- Check that this is an SU2 binary file. SU2 binary files have the hex representation of "SU2" as the first int in the file. ---*/ @@ -9413,7 +9413,7 @@ void CPhysicalGeometry::ReadUnorderedSensitivity(CConfig *config) { unsigned long myUnmatched = unmatched; unmatched = 0; SU2_MPI::Allreduce(&myUnmatched, &unmatched, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); if ((unmatched > 0) && (rank == MASTER_NODE)) { cout << " Warning: there are " << unmatched; cout << " points with a match distance > 1e-10." << endl; @@ -11028,7 +11028,7 @@ void CPhysicalGeometry::SetGlobalMarkerRoughness(const CConfig* config) { auto sizeLocal = static_cast(nMarker_All); // number of local markers /*--- Communicate size of local marker array and make an array large enough to hold all data. ---*/ - SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1, MPI_INT, MPI_COMM_WORLD); + SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1, MPI_INT, SU2_MPI::GetComm()); /*--- Set the global array of displacements, needed to access the correct roughness element. ---*/ GlobalMarkerStorageDispl.resize(size); @@ -11050,5 +11050,5 @@ void CPhysicalGeometry::SetGlobalMarkerRoughness(const CConfig* config) { /*--- Finally, gather the roughness of all markers. ---*/ SU2_MPI::Allgatherv(localRough.data(), sizeLocal, MPI_DOUBLE, GlobalRoughness_Height.data(), - recvCounts.data(), GlobalMarkerStorageDispl.data(), MPI_DOUBLE, MPI_COMM_WORLD); + recvCounts.data(), GlobalMarkerStorageDispl.data(), MPI_DOUBLE, SU2_MPI::GetComm()); } diff --git a/Common/src/geometry/meshreader/CCGNSMeshReaderFVM.cpp b/Common/src/geometry/meshreader/CCGNSMeshReaderFVM.cpp index 1a4a7d409b33..c81244c54cd9 100644 --- a/Common/src/geometry/meshreader/CCGNSMeshReaderFVM.cpp +++ b/Common/src/geometry/meshreader/CCGNSMeshReaderFVM.cpp @@ -637,7 +637,7 @@ void CCGNSMeshReaderFVM::ReadCGNSVolumeSection(int val_section) { many cells it will receive from each other processor. ---*/ SU2_MPI::Alltoall(&(nElem_Send[1]), 1, MPI_INT, - &(nElem_Recv[1]), 1, MPI_INT, MPI_COMM_WORLD); + &(nElem_Recv[1]), 1, MPI_INT, SU2_MPI::GetComm()); /*--- Prepare to send connectivities. First check how many messages we will be sending and receiving. Here we also put @@ -1127,37 +1127,37 @@ void CCGNSMeshReaderFVM::InitiateCommsAll(void *bufSend, switch (commType) { case COMM_TYPE_DOUBLE: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, + count, MPI_DOUBLE, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_UNSIGNED_LONG: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_UNSIGNED_LONG, source, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_LONG, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_LONG: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_LONG, source, tag, MPI_COMM_WORLD, + count, MPI_LONG, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_UNSIGNED_SHORT: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_UNSIGNED_SHORT, source, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_SHORT, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_CHAR: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_CHAR, source, tag, MPI_COMM_WORLD, + count, MPI_CHAR, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_SHORT: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_SHORT, source, tag, MPI_COMM_WORLD, + count, MPI_SHORT, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; case COMM_TYPE_INT: SU2_MPI::Irecv(&(static_cast(bufRecv)[offset]), - count, MPI_INT, source, tag, MPI_COMM_WORLD, + count, MPI_INT, source, tag, SU2_MPI::GetComm(), &(recvReq[iMessage])); break; default: @@ -1202,37 +1202,37 @@ void CCGNSMeshReaderFVM::InitiateCommsAll(void *bufSend, switch (commType) { case COMM_TYPE_DOUBLE: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD, + count, MPI_DOUBLE, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_UNSIGNED_LONG: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_UNSIGNED_LONG, dest, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_LONG, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_LONG: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_LONG, dest, tag, MPI_COMM_WORLD, + count, MPI_LONG, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_UNSIGNED_SHORT: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_UNSIGNED_SHORT, dest, tag, MPI_COMM_WORLD, + count, MPI_UNSIGNED_SHORT, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_CHAR: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_CHAR, dest, tag, MPI_COMM_WORLD, + count, MPI_CHAR, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_SHORT: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_SHORT, dest, tag, MPI_COMM_WORLD, + count, MPI_SHORT, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; case COMM_TYPE_INT: SU2_MPI::Isend(&(static_cast(bufSend)[offset]), - count, MPI_INT, dest, tag, MPI_COMM_WORLD, + count, MPI_INT, dest, tag, SU2_MPI::GetComm(), &(sendReq[iMessage])); break; default: diff --git a/Common/src/graph_coloring_structure.cpp b/Common/src/graph_coloring_structure.cpp index f60871e18164..e26f462659ad 100644 --- a/Common/src/graph_coloring_structure.cpp +++ b/Common/src/graph_coloring_structure.cpp @@ -46,8 +46,8 @@ void CGraphColoringStructure::GraphVertexColoring( int myRank = 0; #ifdef HAVE_MPI - SU2_MPI::Comm_rank(MPI_COMM_WORLD, &myRank); - SU2_MPI::Comm_size(MPI_COMM_WORLD, &nRank); + SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &myRank); + SU2_MPI::Comm_size(SU2_MPI::GetComm(), &nRank); #endif /*--- Determine the algorithm to use for the graph coloring. ---*/ @@ -81,7 +81,7 @@ void CGraphColoringStructure::GraphVertexColoring( /* Determine the size of the message to be received. */ SU2_MPI::Status status; - SU2_MPI::Probe(rank, rank, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(rank, rank, SU2_MPI::GetComm(), &status); int sizeMess; SU2_MPI::Get_count(&status, MPI_UNSIGNED_LONG, &sizeMess); @@ -89,7 +89,7 @@ void CGraphColoringStructure::GraphVertexColoring( /* Allocate the memory for the receive buffer and receive the message. */ vector recvBuf(sizeMess); SU2_MPI::Recv(recvBuf.data(), sizeMess, MPI_UNSIGNED_LONG, rank, rank, - MPI_COMM_WORLD, &status); + SU2_MPI::GetComm(), &status); /* Store the data just received in the global vector for the graph. */ unsigned long ii = 0; @@ -195,7 +195,7 @@ void CGraphColoringStructure::GraphVertexColoring( for(int rank=1; rankSetnNonconvexElements(nNonconvexElements); @@ -513,8 +513,8 @@ void CVolumetricMovement::ComputeSolid_Wall_Distance(CGeometry *geometry, CConfi MinDistance_Local = MinDistance; MinDistance = 0.0; #ifdef HAVE_MPI - SU2_MPI::Allreduce(&MaxDistance_Local, &MaxDistance, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MinDistance_Local, &MinDistance, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MaxDistance_Local, &MaxDistance, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MinDistance_Local, &MinDistance, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); #else MaxDistance = MaxDistance_Local; MinDistance = MinDistance_Local; diff --git a/Common/src/interface_interpolation/CInterpolator.cpp b/Common/src/interface_interpolation/CInterpolator.cpp index 22b0f631454f..afb3c2fff17c 100644 --- a/Common/src/interface_interpolation/CInterpolator.cpp +++ b/Common/src/interface_interpolation/CInterpolator.cpp @@ -46,8 +46,8 @@ bool CInterpolator::CheckInterfaceBoundary(int markDonor, int markTarget) { /*--- Determine whether the boundary is not on the rank because of * the partition or because it is not part of the zone. ---*/ int donorCheck = -1, targetCheck = -1; - SU2_MPI::Allreduce(&markDonor, &donorCheck, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&markTarget, &targetCheck, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&markDonor, &donorCheck, 1, MPI_INT, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&markTarget, &targetCheck, 1, MPI_INT, MPI_MAX, SU2_MPI::GetComm()); return (donorCheck != -1) && (targetCheck != -1); } @@ -74,9 +74,9 @@ void CInterpolator::Determine_ArraySize(int markDonor, int markTarget, Buffer_Send_nVertex_Donor[0] = nLocalVertex_Donor; /*--- Send Interface vertex information --*/ - SU2_MPI::Allreduce(&nLocalVertex_Donor, &MaxLocalVertex_Donor, 1, MPI_UNSIGNED_LONG, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nLocalVertex_Donor, &MaxLocalVertex_Donor, 1, MPI_UNSIGNED_LONG, MPI_MAX, SU2_MPI::GetComm()); SU2_MPI::Allgather(Buffer_Send_nVertex_Donor, 1, MPI_UNSIGNED_LONG, - Buffer_Receive_nVertex_Donor, 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + Buffer_Receive_nVertex_Donor, 1, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); } void CInterpolator::Collect_VertexInfo(int markDonor, int markTarget, @@ -105,9 +105,9 @@ void CInterpolator::Collect_VertexInfo(int markDonor, int markTarget, auto nBuffer_Point = MaxLocalVertex_Donor; SU2_MPI::Allgather(Buffer_Send_Coord, nBuffer_Coord, MPI_DOUBLE, - Buffer_Receive_Coord, nBuffer_Coord, MPI_DOUBLE, MPI_COMM_WORLD); + Buffer_Receive_Coord, nBuffer_Coord, MPI_DOUBLE, SU2_MPI::GetComm()); SU2_MPI::Allgather(Buffer_Send_GlobalPoint, nBuffer_Point, MPI_LONG, - Buffer_Receive_GlobalPoint, nBuffer_Point, MPI_LONG, MPI_COMM_WORLD); + Buffer_Receive_GlobalPoint, nBuffer_Point, MPI_LONG, SU2_MPI::GetComm()); } unsigned long CInterpolator::Collect_ElementInfo(int markDonor, unsigned short nDim, bool compress, @@ -120,7 +120,7 @@ unsigned long CInterpolator::Collect_ElementInfo(int markDonor, unsigned short n if (markDonor != -1) nElemDonor = donor_geometry->GetnElem_Bound(markDonor); allNumElem.resize(size); - SU2_MPI::Allgather(&nElemDonor, 1, MPI_UNSIGNED_LONG, allNumElem.data(), 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + SU2_MPI::Allgather(&nElemDonor, 1, MPI_UNSIGNED_LONG, allNumElem.data(), 1, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); auto nMaxElemDonor = *max_element(allNumElem.begin(), allNumElem.end()); @@ -144,9 +144,9 @@ unsigned long CInterpolator::Collect_ElementInfo(int markDonor, unsigned short n } SU2_MPI::Allgather(bufferSendNum.data(), bufferSendNum.size(), MPI_UNSIGNED_SHORT, - numNodes.data(), bufferSendNum.size(), MPI_UNSIGNED_SHORT, MPI_COMM_WORLD); + numNodes.data(), bufferSendNum.size(), MPI_UNSIGNED_SHORT, SU2_MPI::GetComm()); SU2_MPI::Allgather(bufferSendIdx.data(), bufferSendIdx.size(), MPI_LONG, - idxNodes.data(), bufferSendIdx.size(), MPI_LONG, MPI_COMM_WORLD); + idxNodes.data(), bufferSendIdx.size(), MPI_LONG, SU2_MPI::GetComm()); if (!compress) return accumulate(allNumElem.begin(), allNumElem.end(), 0ul); @@ -275,8 +275,8 @@ void CInterpolator::ReconstructBoundary(unsigned long val_zone, int val_marker){ /*--- Reconstruct boundary by gathering data from all ranks ---*/ - SU2_MPI::Allreduce( &nLocalVertex, &nGlobalVertex, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&nLocalLinkedNodes, &nGlobalLinkedNodes, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce( &nLocalVertex, &nGlobalVertex, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&nLocalLinkedNodes, &nGlobalLinkedNodes, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); Buffer_Receive_Coord = new su2double [ nGlobalVertex * nDim ]; Buffer_Receive_GlobalPoint = new long[ nGlobalVertex ]; @@ -307,15 +307,15 @@ void CInterpolator::ReconstructBoundary(unsigned long val_zone, int val_marker){ for(iRank = 1; iRank < nProcessor; iRank++){ - SU2_MPI::Recv( &iTmp2, 1, MPI_UNSIGNED_LONG, iRank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - SU2_MPI::Recv(&Buffer_Receive_LinkedNodes[tmp_index_2], iTmp2, MPI_UNSIGNED_LONG, iRank, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + SU2_MPI::Recv( &iTmp2, 1, MPI_UNSIGNED_LONG, iRank, 0, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); + SU2_MPI::Recv(&Buffer_Receive_LinkedNodes[tmp_index_2], iTmp2, MPI_UNSIGNED_LONG, iRank, 1, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); - SU2_MPI::Recv( &iTmp, 1, MPI_UNSIGNED_LONG, iRank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - SU2_MPI::Recv(&Buffer_Receive_Coord[tmp_index*nDim], nDim*iTmp, MPI_DOUBLE, iRank, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + SU2_MPI::Recv( &iTmp, 1, MPI_UNSIGNED_LONG, iRank, 0, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); + SU2_MPI::Recv(&Buffer_Receive_Coord[tmp_index*nDim], nDim*iTmp, MPI_DOUBLE, iRank, 1, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); - SU2_MPI::Recv( &Buffer_Receive_GlobalPoint[tmp_index], iTmp, MPI_LONG, iRank, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - SU2_MPI::Recv( &Buffer_Receive_nLinkedNodes[tmp_index], iTmp, MPI_UNSIGNED_LONG, iRank, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - SU2_MPI::Recv(&Buffer_Receive_StartLinkedNodes[tmp_index], iTmp, MPI_UNSIGNED_LONG, iRank, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + SU2_MPI::Recv( &Buffer_Receive_GlobalPoint[tmp_index], iTmp, MPI_LONG, iRank, 1, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); + SU2_MPI::Recv( &Buffer_Receive_nLinkedNodes[tmp_index], iTmp, MPI_UNSIGNED_LONG, iRank, 1, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); + SU2_MPI::Recv(&Buffer_Receive_StartLinkedNodes[tmp_index], iTmp, MPI_UNSIGNED_LONG, iRank, 1, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); for (iVertex = 0; iVertex < iTmp; iVertex++){ Buffer_Receive_Proc[ tmp_index + iVertex ] = iRank; @@ -327,15 +327,15 @@ void CInterpolator::ReconstructBoundary(unsigned long val_zone, int val_marker){ } } else{ - SU2_MPI::Send( &nLocalLinkedNodes, 1, MPI_UNSIGNED_LONG, 0, 0, MPI_COMM_WORLD); - SU2_MPI::Send(Buffer_Send_LinkedNodes, nLocalLinkedNodes, MPI_UNSIGNED_LONG, 0, 1, MPI_COMM_WORLD); + SU2_MPI::Send( &nLocalLinkedNodes, 1, MPI_UNSIGNED_LONG, 0, 0, SU2_MPI::GetComm()); + SU2_MPI::Send(Buffer_Send_LinkedNodes, nLocalLinkedNodes, MPI_UNSIGNED_LONG, 0, 1, SU2_MPI::GetComm()); - SU2_MPI::Send( &nLocalVertex, 1, MPI_UNSIGNED_LONG, 0, 0, MPI_COMM_WORLD); - SU2_MPI::Send(Buffer_Send_Coord, nDim * nLocalVertex, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD); + SU2_MPI::Send( &nLocalVertex, 1, MPI_UNSIGNED_LONG, 0, 0, SU2_MPI::GetComm()); + SU2_MPI::Send(Buffer_Send_Coord, nDim * nLocalVertex, MPI_DOUBLE, 0, 1, SU2_MPI::GetComm()); - SU2_MPI::Send( Buffer_Send_GlobalPoint, nLocalVertex, MPI_UNSIGNED_LONG, 0, 1, MPI_COMM_WORLD); - SU2_MPI::Send( Buffer_Send_nLinkedNodes, nLocalVertex, MPI_UNSIGNED_LONG, 0, 1, MPI_COMM_WORLD); - SU2_MPI::Send(Buffer_Send_StartLinkedNodes, nLocalVertex, MPI_UNSIGNED_LONG, 0, 1, MPI_COMM_WORLD); + SU2_MPI::Send( Buffer_Send_GlobalPoint, nLocalVertex, MPI_UNSIGNED_LONG, 0, 1, SU2_MPI::GetComm()); + SU2_MPI::Send( Buffer_Send_nLinkedNodes, nLocalVertex, MPI_UNSIGNED_LONG, 0, 1, SU2_MPI::GetComm()); + SU2_MPI::Send(Buffer_Send_StartLinkedNodes, nLocalVertex, MPI_UNSIGNED_LONG, 0, 1, SU2_MPI::GetComm()); } #else for (iVertex = 0; iVertex < nDim * nGlobalVertex; iVertex++) @@ -378,13 +378,13 @@ void CInterpolator::ReconstructBoundary(unsigned long val_zone, int val_marker){ } } - SU2_MPI::Bcast(Buffer_Receive_GlobalPoint, nGlobalVertex, MPI_LONG, 0, MPI_COMM_WORLD); - SU2_MPI::Bcast(Buffer_Receive_Coord, nGlobalVertex*nDim, MPI_DOUBLE, 0, MPI_COMM_WORLD); - SU2_MPI::Bcast(Buffer_Receive_Proc, nGlobalVertex, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD); + SU2_MPI::Bcast(Buffer_Receive_GlobalPoint, nGlobalVertex, MPI_LONG, 0, SU2_MPI::GetComm()); + SU2_MPI::Bcast(Buffer_Receive_Coord, nGlobalVertex*nDim, MPI_DOUBLE, 0, SU2_MPI::GetComm()); + SU2_MPI::Bcast(Buffer_Receive_Proc, nGlobalVertex, MPI_UNSIGNED_LONG, 0, SU2_MPI::GetComm()); - SU2_MPI::Bcast(Buffer_Receive_nLinkedNodes, nGlobalVertex, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD); - SU2_MPI::Bcast(Buffer_Receive_StartLinkedNodes, nGlobalVertex, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD); - SU2_MPI::Bcast(Buffer_Receive_LinkedNodes, nGlobalLinkedNodes, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD); + SU2_MPI::Bcast(Buffer_Receive_nLinkedNodes, nGlobalVertex, MPI_UNSIGNED_LONG, 0, SU2_MPI::GetComm()); + SU2_MPI::Bcast(Buffer_Receive_StartLinkedNodes, nGlobalVertex, MPI_UNSIGNED_LONG, 0, SU2_MPI::GetComm()); + SU2_MPI::Bcast(Buffer_Receive_LinkedNodes, nGlobalLinkedNodes, MPI_UNSIGNED_LONG, 0, SU2_MPI::GetComm()); delete [] Buffer_Send_Coord; Buffer_Send_Coord = nullptr; delete [] Buffer_Send_GlobalPoint; Buffer_Send_GlobalPoint = nullptr; diff --git a/Common/src/interface_interpolation/CIsoparametric.cpp b/Common/src/interface_interpolation/CIsoparametric.cpp index 590c32bf7bc6..92e5aef651fe 100644 --- a/Common/src/interface_interpolation/CIsoparametric.cpp +++ b/Common/src/interface_interpolation/CIsoparametric.cpp @@ -266,9 +266,9 @@ void CIsoparametric::SetTransferCoeff(const CConfig* const* config) { /*--- Final reduction of statistics. ---*/ su2double tmp = MaxDistance; unsigned long tmp1 = ErrorCounter, tmp2 = nGlobalVertexTarget; - SU2_MPI::Allreduce(&tmp, &MaxDistance, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&tmp1, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&tmp2, &nGlobalVertexTarget, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &MaxDistance, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&tmp1, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&tmp2, &nGlobalVertexTarget, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); ErrorRate = 100*su2double(ErrorCounter) / nGlobalVertexTarget; diff --git a/Common/src/interface_interpolation/CMirror.cpp b/Common/src/interface_interpolation/CMirror.cpp index 502ae2714009..ed42afb51ff5 100644 --- a/Common/src/interface_interpolation/CMirror.cpp +++ b/Common/src/interface_interpolation/CMirror.cpp @@ -93,11 +93,11 @@ void CMirror::SetTransferCoeff(const CConfig* const* config) { /*--- Communicate vertex and donor node counts. ---*/ SU2_MPI::Allgather(&nVertexTarget, 1, MPI_UNSIGNED_LONG, - allNumVertexTarget.data(), 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + allNumVertexTarget.data(), 1, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); SU2_MPI::Allgather(&nVertexDonorLocal, 1, MPI_UNSIGNED_LONG, - allNumVertexDonor.data(), 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + allNumVertexDonor.data(), 1, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); SU2_MPI::Allgather(&nNodeDonorLocal, 1, MPI_UNSIGNED_LONG, - allNumNodeDonor.data(), 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + allNumNodeDonor.data(), 1, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); /*--- Copy donor interpolation matrix (triplet format). ---*/ vector sendGlobalIndex(nNodeDonorLocal); @@ -175,15 +175,15 @@ void CMirror::SetTransferCoeff(const CConfig* const* config) { GlobalIndex[iSend] = new long [numCoeff]; DonorIndex[iSend] = new long [numCoeff]; DonorCoeff[iSend] = new su2double [numCoeff]; - SU2_MPI::Recv(GlobalIndex[iSend], numCoeff, MPI_LONG, jProcessor, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - SU2_MPI::Recv(DonorIndex[iSend], numCoeff, MPI_LONG, jProcessor, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - SU2_MPI::Recv(DonorCoeff[iSend], numCoeff, MPI_DOUBLE, jProcessor, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + SU2_MPI::Recv(GlobalIndex[iSend], numCoeff, MPI_LONG, jProcessor, 0, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); + SU2_MPI::Recv(DonorIndex[iSend], numCoeff, MPI_LONG, jProcessor, 0, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); + SU2_MPI::Recv(DonorCoeff[iSend], numCoeff, MPI_DOUBLE, jProcessor, 0, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); } else if (rank == jProcessor) { /*--- "I'm" the donor, send. ---*/ - SU2_MPI::Send(sendGlobalIndex.data(), numCoeff, MPI_LONG, iProcessor, 0, MPI_COMM_WORLD); - SU2_MPI::Send(sendDonorIndex.data(), numCoeff, MPI_LONG, iProcessor, 0, MPI_COMM_WORLD); - SU2_MPI::Send(sendDonorCoeff.data(), numCoeff, MPI_DOUBLE, iProcessor, 0, MPI_COMM_WORLD); + SU2_MPI::Send(sendGlobalIndex.data(), numCoeff, MPI_LONG, iProcessor, 0, SU2_MPI::GetComm()); + SU2_MPI::Send(sendDonorIndex.data(), numCoeff, MPI_LONG, iProcessor, 0, SU2_MPI::GetComm()); + SU2_MPI::Send(sendDonorCoeff.data(), numCoeff, MPI_DOUBLE, iProcessor, 0, SU2_MPI::GetComm()); } } } diff --git a/Common/src/interface_interpolation/CNearestNeighbor.cpp b/Common/src/interface_interpolation/CNearestNeighbor.cpp index e1de1ff93021..c4cdd7830876 100644 --- a/Common/src/interface_interpolation/CNearestNeighbor.cpp +++ b/Common/src/interface_interpolation/CNearestNeighbor.cpp @@ -177,10 +177,10 @@ void CNearestNeighbor::SetTransferCoeff(const CConfig* const* config) { delete[] Buffer_Receive_nVertex_Donor; unsigned long tmp = totalTargetPoints; - SU2_MPI::Allreduce(&tmp, &totalTargetPoints, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &totalTargetPoints, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); su2double tmp1 = AvgDistance, tmp2 = MaxDistance; - SU2_MPI::Allreduce(&tmp1, &AvgDistance, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&tmp2, &MaxDistance, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp1, &AvgDistance, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&tmp2, &MaxDistance, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); AvgDistance /= totalTargetPoints; } diff --git a/Common/src/interface_interpolation/CRadialBasisFunction.cpp b/Common/src/interface_interpolation/CRadialBasisFunction.cpp index fcbf242537af..82bd1ebef0d0 100644 --- a/Common/src/interface_interpolation/CRadialBasisFunction.cpp +++ b/Common/src/interface_interpolation/CRadialBasisFunction.cpp @@ -250,25 +250,25 @@ void CRadialBasisFunction::SetTransferCoeff(const CConfig* const* config) { #ifdef HAVE_MPI /*--- For simplicity, broadcast small information about the interpolation matrix. ---*/ - SU2_MPI::Bcast(&nPolynomial, 1, MPI_INT, iProcessor, MPI_COMM_WORLD); - SU2_MPI::Bcast(keepPolynomialRow.data(), nDim, MPI_INT, iProcessor, MPI_COMM_WORLD); + SU2_MPI::Bcast(&nPolynomial, 1, MPI_INT, iProcessor, SU2_MPI::GetComm()); + SU2_MPI::Bcast(keepPolynomialRow.data(), nDim, MPI_INT, iProcessor, SU2_MPI::GetComm()); /*--- Send C_inv_trunc only to the ranks that need it (those with target points), * partial broadcast. MPI wrapper not used due to passive double. ---*/ vector allNumVertex(nProcessor); SU2_MPI::Allgather(&nVertexTarget, 1, MPI_UNSIGNED_LONG, - allNumVertex.data(), 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + allNumVertex.data(), 1, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); if (rank == iProcessor) { for (int jProcessor = 0; jProcessor < nProcessor; ++jProcessor) if ((jProcessor != iProcessor) && (allNumVertex[jProcessor] != 0)) MPI_Send(C_inv_trunc.data(), C_inv_trunc.size(), - MPI_DOUBLE, jProcessor, 0, MPI_COMM_WORLD); + MPI_DOUBLE, jProcessor, 0, SU2_MPI::GetComm()); } else if (nVertexTarget != 0) { C_inv_trunc.resize(1+nPolynomial+nGlobalVertexDonor, nGlobalVertexDonor); MPI_Recv(C_inv_trunc.data(), C_inv_trunc.size(), MPI_DOUBLE, - iProcessor, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + iProcessor, 0, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); } #endif @@ -403,7 +403,7 @@ void CRadialBasisFunction::SetTransferCoeff(const CConfig* const* config) { /*--- Final reduction of interpolation statistics and basic sanity checks. ---*/ auto Reduce = [](SU2_MPI::Op op, unsigned long &val) { auto tmp = val; - SU2_MPI::Allreduce(&tmp, &val, 1, MPI_UNSIGNED_LONG, op, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &val, 1, MPI_UNSIGNED_LONG, op, SU2_MPI::GetComm()); }; Reduce(MPI_SUM, totalTargetPoints); Reduce(MPI_SUM, totalDonorPoints); @@ -412,8 +412,8 @@ void CRadialBasisFunction::SetTransferCoeff(const CConfig* const* config) { Reduce(MPI_MAX, MaxDonors); #ifdef HAVE_MPI passivedouble tmp1 = AvgCorrection, tmp2 = MaxCorrection; - MPI_Allreduce(&tmp1, &AvgCorrection, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - MPI_Allreduce(&tmp2, &MaxCorrection, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + MPI_Allreduce(&tmp1, &AvgCorrection, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + MPI_Allreduce(&tmp2, &MaxCorrection, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); #endif if (totalTargetPoints == 0) SU2_MPI::Error("Somehow there are no target interpolation points.", CURRENT_FUNCTION); diff --git a/Common/src/linear_algebra/CPastixWrapper.cpp b/Common/src/linear_algebra/CPastixWrapper.cpp index 9a1c67d86e36..08e115138e60 100644 --- a/Common/src/linear_algebra/CPastixWrapper.cpp +++ b/Common/src/linear_algebra/CPastixWrapper.cpp @@ -113,7 +113,7 @@ void CPastixWrapper::Initialize(CGeometry *geometry, const CConfig * #ifdef HAVE_MPI vector domain_sizes(mpi_size); - MPI_Allgather(&nPointDomain, 1, MPI_UNSIGNED_LONG, domain_sizes.data(), 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + MPI_Allgather(&nPointDomain, 1, MPI_UNSIGNED_LONG, domain_sizes.data(), 1, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); for (int i=0; i::Initialize(CGeometry *geometry, const CConfig * /*--- Send and Receive data ---*/ MPI_Sendrecv(Buffer_Send.data(), nVertexS, MPI_UNSIGNED_LONG, sender, 0, Buffer_Recv.data(), nVertexR, MPI_UNSIGNED_LONG, recver, 0, - MPI_COMM_WORLD, MPI_STATUS_IGNORE); + SU2_MPI::GetComm(), MPI_STATUS_IGNORE); /*--- Store received data---*/ for (unsigned long iVertex = 0; iVertex < nVertexR; iVertex++) diff --git a/Common/src/linear_algebra/CSysMatrix.cpp b/Common/src/linear_algebra/CSysMatrix.cpp index dd6aaae8f620..1f74bbdee210 100644 --- a/Common/src/linear_algebra/CSysMatrix.cpp +++ b/Common/src/linear_algebra/CSysMatrix.cpp @@ -1112,8 +1112,8 @@ unsigned long CSysMatrix::BuildLineletPreconditioner(CGeometry *geom } Local_nLineLets = nLinelet; - SU2_MPI::Allreduce(&Local_nPoints, &Global_nPoints, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&Local_nLineLets, &Global_nLineLets, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Local_nPoints, &Global_nPoints, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&Local_nLineLets, &Global_nLineLets, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); /*--- Memory allocation --*/ diff --git a/SU2_CFD/include/limiters/CLimiterDetails.hpp b/SU2_CFD/include/limiters/CLimiterDetails.hpp index d90c54fee50c..7c9dbb1fe62d 100644 --- a/SU2_CFD/include/limiters/CLimiterDetails.hpp +++ b/SU2_CFD/include/limiters/CLimiterDetails.hpp @@ -210,10 +210,10 @@ struct CLimiterDetails SU2_OMP_MASTER { localMin = sharedMin; - SU2_MPI::Allreduce(localMin.data(), sharedMin.data(), varEnd, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(localMin.data(), sharedMin.data(), varEnd, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); localMax = sharedMax; - SU2_MPI::Allreduce(localMax.data(), sharedMax.data(), varEnd, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(localMax.data(), sharedMax.data(), varEnd, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); } SU2_OMP_BARRIER diff --git a/SU2_CFD/include/solvers/CFVMFlowSolverBase.inl b/SU2_CFD/include/solvers/CFVMFlowSolverBase.inl index 8cfcdcd299b3..7ccdd2c07070 100644 --- a/SU2_CFD/include/solvers/CFVMFlowSolverBase.inl +++ b/SU2_CFD/include/solvers/CFVMFlowSolverBase.inl @@ -347,10 +347,10 @@ void CFVMFlowSolverBase::HybridParallelInitialization(const CConfig& confi /*--- If the reducer strategy is not being forced (by EDGE_COLORING_GROUP_SIZE=0) print some messages. ---*/ if (config.GetEdgeColoringGroupSize() != 1 << 30) { su2double minEff = 1.0; - SU2_MPI::Reduce(¶llelEff, &minEff, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(¶llelEff, &minEff, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm()); int tmp = ReducerStrategy, numRanksUsingReducer = 0; - SU2_MPI::Reduce(&tmp, &numRanksUsingReducer, 1, MPI_INT, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&tmp, &numRanksUsingReducer, 1, MPI_INT, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); if (minEff < COLORING_EFF_THRESH) { cout << "WARNING: On " << numRanksUsingReducer << " MPI ranks the coloring efficiency was less than " @@ -1599,7 +1599,7 @@ void CFVMFlowSolverBase::Pressure_Forces(const CGeometry* geometr auto Allreduce = [](su2double x) { su2double tmp = x; x = 0.0; - SU2_MPI::Allreduce(&tmp, &x, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &x, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); return x; }; AllBoundInvCoeff.CD = Allreduce(AllBoundInvCoeff.CD); @@ -1636,7 +1636,7 @@ void CFVMFlowSolverBase::Pressure_Forces(const CGeometry* geometr su2double* buffer = new su2double[nMarkerMon]; auto Allreduce_inplace = [buffer](int size, su2double* x) { - SU2_MPI::Allreduce(x, buffer, size, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(x, buffer, size, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); for (int i = 0; i < size; ++i) x[i] = buffer[i]; }; @@ -1920,7 +1920,7 @@ void CFVMFlowSolverBase::Momentum_Forces(const CGeometry* geometr auto Allreduce = [](su2double x) { su2double tmp = x; x = 0.0; - SU2_MPI::Allreduce(&tmp, &x, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &x, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); return x; }; @@ -1957,7 +1957,7 @@ void CFVMFlowSolverBase::Momentum_Forces(const CGeometry* geometr su2double* buffer = new su2double[nMarkerMon]; auto Allreduce_inplace = [buffer](int size, su2double* x) { - SU2_MPI::Allreduce(x, buffer, size, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(x, buffer, size, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); for (int i = 0; i < size; ++i) x[i] = buffer[i]; }; @@ -2384,7 +2384,7 @@ void CFVMFlowSolverBase::Friction_Forces(const CGeometry* geometr auto Allreduce = [](su2double x) { su2double tmp = x; x = 0.0; - SU2_MPI::Allreduce(&tmp, &x, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &x, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); return x; }; AllBoundViscCoeff.CD = Allreduce(AllBoundViscCoeff.CD); @@ -2423,7 +2423,7 @@ void CFVMFlowSolverBase::Friction_Forces(const CGeometry* geometr su2double* buffer = new su2double[nMarkerMon]; auto Allreduce_inplace = [buffer](int size, su2double* x) { - SU2_MPI::Allreduce(x, buffer, size, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(x, buffer, size, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); for (int i = 0; i < size; ++i) x[i] = buffer[i]; }; diff --git a/SU2_CFD/src/CMarkerProfileReaderFVM.cpp b/SU2_CFD/src/CMarkerProfileReaderFVM.cpp index 05a85782420c..dc12ddaf30df 100644 --- a/SU2_CFD/src/CMarkerProfileReaderFVM.cpp +++ b/SU2_CFD/src/CMarkerProfileReaderFVM.cpp @@ -208,8 +208,8 @@ void CMarkerProfileReaderFVM::MergeProfileMarkers() { /*--- Communicate the total number of nodes on this domain. ---*/ SU2_MPI::Gather(&Buffer_Send_nPoin, 1, MPI_UNSIGNED_LONG, - Buffer_Recv_nPoin, 1, MPI_UNSIGNED_LONG, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&nLocalPoint, &MaxLocalPoint, 1, MPI_UNSIGNED_LONG, MPI_MAX, MPI_COMM_WORLD); + Buffer_Recv_nPoin, 1, MPI_UNSIGNED_LONG, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&nLocalPoint, &MaxLocalPoint, 1, MPI_UNSIGNED_LONG, MPI_MAX, SU2_MPI::GetComm()); /*--- Send and Recv buffers. ---*/ @@ -300,15 +300,15 @@ void CMarkerProfileReaderFVM::MergeProfileMarkers() { /*--- Gather the coordinate data on the master node using MPI. ---*/ SU2_MPI::Gather(Buffer_Send_X, (int)MaxLocalPoint, MPI_DOUBLE, - Buffer_Recv_X, (int)MaxLocalPoint, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + Buffer_Recv_X, (int)MaxLocalPoint, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); SU2_MPI::Gather(Buffer_Send_Y, (int)MaxLocalPoint, MPI_DOUBLE, - Buffer_Recv_Y, (int)MaxLocalPoint, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + Buffer_Recv_Y, (int)MaxLocalPoint, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); if (dimension == 3) { SU2_MPI::Gather(Buffer_Send_Z, (int)MaxLocalPoint, MPI_DOUBLE, - Buffer_Recv_Z, (int)MaxLocalPoint, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + Buffer_Recv_Z, (int)MaxLocalPoint, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); } SU2_MPI::Gather(Buffer_Send_Str, (int)MaxLocalPoint*MAX_STRING_SIZE, MPI_CHAR, - Buffer_Recv_Str, (int)MaxLocalPoint*MAX_STRING_SIZE, MPI_CHAR, MASTER_NODE, MPI_COMM_WORLD); + Buffer_Recv_Str, (int)MaxLocalPoint*MAX_STRING_SIZE, MPI_CHAR, MASTER_NODE, SU2_MPI::GetComm()); /*--- The master node unpacks and sorts this variable by marker tag. ---*/ diff --git a/SU2_CFD/src/definition_structure.cpp b/SU2_CFD/src/definition_structure.cpp index f37d74a9db02..b0a4b0d2d54b 100644 --- a/SU2_CFD/src/definition_structure.cpp +++ b/SU2_CFD/src/definition_structure.cpp @@ -46,8 +46,8 @@ void Partition_Analysis(CGeometry *geometry, CConfig *config) { int size = SINGLE_NODE; #ifdef HAVE_MPI - SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank); - SU2_MPI::Comm_size(MPI_COMM_WORLD, &size); + SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank); + SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size); #endif nPointTotal = geometry->GetnPoint(); @@ -119,7 +119,7 @@ void Partition_Analysis(CGeometry *geometry, CConfig *config) { Profile_File << "\"Rank\", \"nNeighbors\", \"nPointTotal\", \"nEdge\", \"nPointGhost\", \"nSendTotal\", \"nRecvTotal\", \"nElemTotal\", \"nElemBoundary\", \"nElemHalo\", \"nnz\"" << endl; Profile_File.close(); } - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- Loop through the map and write the results to the file ---*/ @@ -129,7 +129,7 @@ void Partition_Analysis(CGeometry *geometry, CConfig *config) { Profile_File << rank << ", " << nNeighbors << ", " << nPointTotal << ", " << nEdge << "," << nPointGhost << ", " << nSendTotal << ", " << nRecvTotal << ", " << nElemTotal << "," << nElemBound << ", " << nElemHalo << ", " << nnz << endl; Profile_File.close(); } - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); } delete [] isHalo; @@ -151,8 +151,8 @@ void Partition_Analysis_FEM(CGeometry *geometry, CConfig *config) { int size = SINGLE_NODE; #ifdef HAVE_MPI - SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank); - SU2_MPI::Comm_size(MPI_COMM_WORLD, &size); + SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank); + SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size); #endif /*--- Create an object of the class CMeshFEM_DG and retrieve the necessary @@ -218,7 +218,7 @@ void Partition_Analysis_FEM(CGeometry *geometry, CConfig *config) { Profile_File << "\"Rank\", \"nNeighSend\", \"nNeighRecv\", \"nElemOwned\", \"nElemSendTotal\", \"nElemRecvTotal\", \"nDOFOwned\", \"nDOFSendTotal\", \"nDOFRecvTotal\"" << endl; Profile_File.close(); } - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- Loop through the map and write the results to the file ---*/ @@ -230,7 +230,7 @@ void Partition_Analysis_FEM(CGeometry *geometry, CConfig *config) { << nDOFSendTotal << ", " << nDOFRecvTotal << endl; Profile_File.close(); } - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); } } diff --git a/SU2_CFD/src/drivers/CDiscAdjMultizoneDriver.cpp b/SU2_CFD/src/drivers/CDiscAdjMultizoneDriver.cpp index 33eab6df187c..241b39068382 100644 --- a/SU2_CFD/src/drivers/CDiscAdjMultizoneDriver.cpp +++ b/SU2_CFD/src/drivers/CDiscAdjMultizoneDriver.cpp @@ -569,7 +569,7 @@ void CDiscAdjMultizoneDriver::SetRecording(unsigned short kind_recording, Kind_T #ifdef CODI_REVERSE_TYPE if (size > SINGLE_NODE) { su2double myMem = AD::globalTape.getTapeValues().getUsedMemorySize(), totMem = 0.0; - SU2_MPI::Allreduce(&myMem, &totMem, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&myMem, &totMem, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); if (rank == MASTER_NODE) { cout << "MPI\n"; cout << "-------------------------------------\n"; diff --git a/SU2_CFD/src/drivers/CDiscAdjSinglezoneDriver.cpp b/SU2_CFD/src/drivers/CDiscAdjSinglezoneDriver.cpp index 8cc5edd4437b..48a9463e00db 100644 --- a/SU2_CFD/src/drivers/CDiscAdjSinglezoneDriver.cpp +++ b/SU2_CFD/src/drivers/CDiscAdjSinglezoneDriver.cpp @@ -296,7 +296,7 @@ void CDiscAdjSinglezoneDriver::SetRecording(unsigned short kind_recording){ #ifdef CODI_REVERSE_TYPE if (size > SINGLE_NODE) { su2double myMem = AD::globalTape.getTapeValues().getUsedMemorySize(), totMem = 0.0; - SU2_MPI::Allreduce(&myMem, &totMem, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&myMem, &totMem, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); if (rank == MASTER_NODE) { cout << "MPI\n"; cout << "-------------------------------------\n"; diff --git a/SU2_CFD/src/drivers/CMultizoneDriver.cpp b/SU2_CFD/src/drivers/CMultizoneDriver.cpp index e1e14188a4de..68019153cdbe 100644 --- a/SU2_CFD/src/drivers/CMultizoneDriver.cpp +++ b/SU2_CFD/src/drivers/CMultizoneDriver.cpp @@ -264,7 +264,7 @@ void CMultizoneDriver::Preprocess(unsigned long TimeIter) { } #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif /*--- Run a predictor step ---*/ diff --git a/SU2_CFD/src/drivers/CSinglezoneDriver.cpp b/SU2_CFD/src/drivers/CSinglezoneDriver.cpp index 30fe1077d670..0729a2c1458f 100644 --- a/SU2_CFD/src/drivers/CSinglezoneDriver.cpp +++ b/SU2_CFD/src/drivers/CSinglezoneDriver.cpp @@ -132,7 +132,7 @@ void CSinglezoneDriver::Preprocess(unsigned long TimeIter) { } #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif /*--- Run a predictor step ---*/ diff --git a/SU2_CFD/src/integration/CIntegration.cpp b/SU2_CFD/src/integration/CIntegration.cpp index 678361fb7bf4..811b1b5608cd 100644 --- a/SU2_CFD/src/integration/CIntegration.cpp +++ b/SU2_CFD/src/integration/CIntegration.cpp @@ -283,9 +283,9 @@ void CIntegration::SetDualTime_Solver(CGeometry *geometry, CSolver *solver, CCon /*--- Gather the data on the master node. ---*/ - SU2_MPI::Gather(&plunge, 1, MPI_DOUBLE, plunge_all, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(&pitch, 1, MPI_DOUBLE, pitch_all, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(&owner, 1, MPI_UNSIGNED_LONG, owner_all, 1, MPI_UNSIGNED_LONG, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Gather(&plunge, 1, MPI_DOUBLE, plunge_all, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(&pitch, 1, MPI_DOUBLE, pitch_all, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(&owner, 1, MPI_UNSIGNED_LONG, owner_all, 1, MPI_UNSIGNED_LONG, MASTER_NODE, SU2_MPI::GetComm()); /*--- Set plunge and pitch on the master node ---*/ diff --git a/SU2_CFD/src/interfaces/CInterface.cpp b/SU2_CFD/src/interfaces/CInterface.cpp index 97eba31f2c11..71f76e7d0726 100644 --- a/SU2_CFD/src/interfaces/CInterface.cpp +++ b/SU2_CFD/src/interfaces/CInterface.cpp @@ -94,7 +94,7 @@ void CInterface::BroadcastData(const CInterpolator& interpolator, * sums) to perform an Allgatherv of donor indices and variables. ---*/ vector nAllVertexDonor(size), nAllVarCounts(size), displIdx(size,0), displVar(size); - SU2_MPI::Allgather(&nLocalVertexDonor, 1, MPI_INT, nAllVertexDonor.data(), 1, MPI_INT, MPI_COMM_WORLD); + SU2_MPI::Allgather(&nLocalVertexDonor, 1, MPI_INT, nAllVertexDonor.data(), 1, MPI_INT, SU2_MPI::GetComm()); for (int i = 0; i < size; ++i) { nAllVarCounts[i] = nAllVertexDonor[i] * nVar; @@ -131,10 +131,10 @@ void CInterface::BroadcastData(const CInterpolator& interpolator, su2activematrix donorVar(nGlobalVertexDonor, nVar); SU2_MPI::Allgatherv(sendDonorIdx.data(), sendDonorIdx.size(), MPI_UNSIGNED_LONG, donorIdx.data(), - nAllVertexDonor.data(), displIdx.data(), MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + nAllVertexDonor.data(), displIdx.data(), MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); SU2_MPI::Allgatherv(sendDonorVar.data(), sendDonorVar.size(), MPI_DOUBLE, donorVar.data(), - nAllVarCounts.data(), displVar.data(), MPI_DOUBLE, MPI_COMM_WORLD); + nAllVarCounts.data(), displVar.data(), MPI_DOUBLE, SU2_MPI::GetComm()); /*--- This rank does not need to do more work. ---*/ if (markTarget < 0) continue; @@ -242,8 +242,8 @@ void CInterface::PreprocessAverage(CGeometry *donor_geometry, CGeometry *target_ BuffDonorFlag[iSize] = -1; } - SU2_MPI::Allgather(&Marker_Donor, 1 , MPI_INT, BuffMarkerDonor, 1, MPI_INT, MPI_COMM_WORLD); - SU2_MPI::Allgather(&Donor_Flag, 1 , MPI_INT, BuffDonorFlag, 1, MPI_INT, MPI_COMM_WORLD); + SU2_MPI::Allgather(&Marker_Donor, 1 , MPI_INT, BuffMarkerDonor, 1, MPI_INT, SU2_MPI::GetComm()); + SU2_MPI::Allgather(&Donor_Flag, 1 , MPI_INT, BuffDonorFlag, 1, MPI_INT, SU2_MPI::GetComm()); Marker_Donor= -1; Donor_Flag= -1; @@ -468,22 +468,22 @@ void CInterface::AllgatherAverage(CSolver *donor_solution, CSolver *target_solut } SU2_MPI::Allgather(avgDensityDonor, nSpanDonor , MPI_DOUBLE, BuffAvgDensityDonor, - nSpanDonor, MPI_DOUBLE, MPI_COMM_WORLD); + nSpanDonor, MPI_DOUBLE, SU2_MPI::GetComm()); SU2_MPI::Allgather(avgPressureDonor, nSpanDonor , MPI_DOUBLE, BuffAvgPressureDonor, - nSpanDonor, MPI_DOUBLE, MPI_COMM_WORLD); + nSpanDonor, MPI_DOUBLE, SU2_MPI::GetComm()); SU2_MPI::Allgather(avgNormalVelDonor, nSpanDonor , MPI_DOUBLE, BuffAvgNormalVelDonor, - nSpanDonor, MPI_DOUBLE, MPI_COMM_WORLD); + nSpanDonor, MPI_DOUBLE, SU2_MPI::GetComm()); SU2_MPI::Allgather(avgTangVelDonor, nSpanDonor , MPI_DOUBLE, BuffAvgTangVelDonor, - nSpanDonor, MPI_DOUBLE, MPI_COMM_WORLD); + nSpanDonor, MPI_DOUBLE, SU2_MPI::GetComm()); SU2_MPI::Allgather(avg3DVelDonor, nSpanDonor , MPI_DOUBLE, BuffAvg3DVelDonor, - nSpanDonor, MPI_DOUBLE, MPI_COMM_WORLD); + nSpanDonor, MPI_DOUBLE, SU2_MPI::GetComm()); SU2_MPI::Allgather(avgNuDonor, nSpanDonor , MPI_DOUBLE, BuffAvgNuDonor, - nSpanDonor, MPI_DOUBLE, MPI_COMM_WORLD); + nSpanDonor, MPI_DOUBLE, SU2_MPI::GetComm()); SU2_MPI::Allgather(avgKineDonor, nSpanDonor , MPI_DOUBLE, BuffAvgKineDonor, - nSpanDonor, MPI_DOUBLE, MPI_COMM_WORLD); + nSpanDonor, MPI_DOUBLE, SU2_MPI::GetComm()); SU2_MPI::Allgather(avgOmegaDonor, nSpanDonor , MPI_DOUBLE, BuffAvgOmegaDonor, - nSpanDonor, MPI_DOUBLE, MPI_COMM_WORLD); - SU2_MPI::Allgather(&Marker_Donor, 1 , MPI_INT, BuffMarkerDonor, 1, MPI_INT, MPI_COMM_WORLD); + nSpanDonor, MPI_DOUBLE, SU2_MPI::GetComm()); + SU2_MPI::Allgather(&Marker_Donor, 1 , MPI_INT, BuffMarkerDonor, 1, MPI_INT, SU2_MPI::GetComm()); for (iSpan = 0; iSpan < nSpanDonor; iSpan++){ avgDensityDonor[iSpan] = -1.0; diff --git a/SU2_CFD/src/output/CFlowOutput.cpp b/SU2_CFD/src/output/CFlowOutput.cpp index e7c82a93efbb..f1806e7a290b 100644 --- a/SU2_CFD/src/output/CFlowOutput.cpp +++ b/SU2_CFD/src/output/CFlowOutput.cpp @@ -382,19 +382,19 @@ void CFlowOutput::SetAnalyzeSurface(CSolver *solver, CGeometry *geometry, CConfi #ifdef HAVE_MPI - SU2_MPI::Allreduce(Surface_MassFlow_Local, Surface_MassFlow_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Mach_Local, Surface_Mach_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Temperature_Local, Surface_Temperature_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Density_Local, Surface_Density_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Enthalpy_Local, Surface_Enthalpy_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_NormalVelocity_Local, Surface_NormalVelocity_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_StreamVelocity2_Local, Surface_StreamVelocity2_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_TransvVelocity2_Local, Surface_TransvVelocity2_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Pressure_Local, Surface_Pressure_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_TotalTemperature_Local, Surface_TotalTemperature_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_TotalPressure_Local, Surface_TotalPressure_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Area_Local, Surface_Area_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_MassFlow_Abs_Local, Surface_MassFlow_Abs_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(Surface_MassFlow_Local, Surface_MassFlow_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Mach_Local, Surface_Mach_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Temperature_Local, Surface_Temperature_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Density_Local, Surface_Density_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Enthalpy_Local, Surface_Enthalpy_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_NormalVelocity_Local, Surface_NormalVelocity_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_StreamVelocity2_Local, Surface_StreamVelocity2_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_TransvVelocity2_Local, Surface_TransvVelocity2_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Pressure_Local, Surface_Pressure_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_TotalTemperature_Local, Surface_TotalTemperature_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_TotalPressure_Local, Surface_TotalPressure_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Area_Local, Surface_Area_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_MassFlow_Abs_Local, Surface_MassFlow_Abs_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); #else @@ -831,7 +831,7 @@ void CFlowOutput::Set_CpInverseDesign(CSolver *solver, CGeometry *geometry, CCon if (!(Surface_file.fail())) { nPointLocal = geometry->GetnPoint(); - SU2_MPI::Allreduce(&nPointLocal, &nPointGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nPointLocal, &nPointGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); Point2Vertex = new unsigned long[nPointGlobal][2]; PointInDomain = new bool[nPointGlobal]; @@ -920,7 +920,7 @@ void CFlowOutput::Set_CpInverseDesign(CSolver *solver, CGeometry *geometry, CCon #ifdef HAVE_MPI su2double MyPressDiff = PressDiff; - SU2_MPI::Allreduce(&MyPressDiff, &PressDiff, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyPressDiff, &PressDiff, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); #endif } diff --git a/SU2_CFD/src/output/COutput.cpp b/SU2_CFD/src/output/COutput.cpp index 541489766c0c..30b70e83fef6 100644 --- a/SU2_CFD/src/output/COutput.cpp +++ b/SU2_CFD/src/output/COutput.cpp @@ -549,7 +549,7 @@ void COutput::WriteToFile(CConfig *config, CGeometry *geometry, unsigned short f /*--- Only sort if there is at least one processor that has this marker ---*/ int globalMarkerSize = 0, localMarkerSize = marker.size(); - SU2_MPI::Allreduce(&localMarkerSize, &globalMarkerSize, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&localMarkerSize, &globalMarkerSize, 1, MPI_INT, MPI_SUM, SU2_MPI::GetComm()); if (globalMarkerSize > 0){ @@ -921,7 +921,7 @@ bool COutput::Convergence_Monitoring(CConfig *config, unsigned long Iteration) { /*--- Convergence criteria ---*/ sbuf_conv[0] = convergence; - SU2_MPI::Reduce(sbuf_conv, rbuf_conv, 1, MPI_UNSIGNED_SHORT, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(sbuf_conv, rbuf_conv, 1, MPI_UNSIGNED_SHORT, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); /*-- Compute global convergence criteria in the master node --*/ @@ -931,7 +931,7 @@ bool COutput::Convergence_Monitoring(CConfig *config, unsigned long Iteration) { else sbuf_conv[0] = 0; } - SU2_MPI::Bcast(sbuf_conv, 1, MPI_UNSIGNED_SHORT, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(sbuf_conv, 1, MPI_UNSIGNED_SHORT, MASTER_NODE, SU2_MPI::GetComm()); if (sbuf_conv[0] == 1) { convergence = true; } else { convergence = false; } diff --git a/SU2_CFD/src/output/filewriter/CCSVFileWriter.cpp b/SU2_CFD/src/output/filewriter/CCSVFileWriter.cpp index df37c06faf1c..7ffd2fb96702 100644 --- a/SU2_CFD/src/output/filewriter/CCSVFileWriter.cpp +++ b/SU2_CFD/src/output/filewriter/CCSVFileWriter.cpp @@ -71,11 +71,11 @@ void CCSVFileWriter::Write_Data(){ to the master node with collective calls. ---*/ SU2_MPI::Allreduce(&nLocalVertex_Surface, &MaxLocalVertex_Surface, 1, - MPI_UNSIGNED_LONG, MPI_MAX, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_MAX, SU2_MPI::GetComm()); SU2_MPI::Gather(&Buffer_Send_nVertex, 1, MPI_UNSIGNED_LONG, Buffer_Recv_nVertex, 1, MPI_UNSIGNED_LONG, - MASTER_NODE, MPI_COMM_WORLD); + MASTER_NODE, SU2_MPI::GetComm()); /*--- Allocate buffers for send/recv of the data and global IDs. ---*/ @@ -113,10 +113,10 @@ void CCSVFileWriter::Write_Data(){ /*--- Collective comms of the solution data and global IDs. ---*/ SU2_MPI::Gather(bufD_Send, (int)MaxLocalVertex_Surface*fieldNames.size(), MPI_DOUBLE, - bufD_Recv, (int)MaxLocalVertex_Surface*fieldNames.size(), MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + bufD_Recv, (int)MaxLocalVertex_Surface*fieldNames.size(), MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); SU2_MPI::Gather(bufL_Send, (int)MaxLocalVertex_Surface, MPI_UNSIGNED_LONG, - bufL_Recv, (int)MaxLocalVertex_Surface, MPI_UNSIGNED_LONG, MASTER_NODE, MPI_COMM_WORLD); + bufL_Recv, (int)MaxLocalVertex_Surface, MPI_UNSIGNED_LONG, MASTER_NODE, SU2_MPI::GetComm()); /*--- The master rank alone writes the surface CSV file. ---*/ diff --git a/SU2_CFD/src/output/filewriter/CFEMDataSorter.cpp b/SU2_CFD/src/output/filewriter/CFEMDataSorter.cpp index ae8425da1eeb..61244dd63964 100644 --- a/SU2_CFD/src/output/filewriter/CFEMDataSorter.cpp +++ b/SU2_CFD/src/output/filewriter/CFEMDataSorter.cpp @@ -62,7 +62,7 @@ CFEMDataSorter::CFEMDataSorter(CConfig *config, CGeometry *geometry, const vecto } SU2_MPI::Allreduce(&nLocalPointsBeforeSort, &nGlobalPointBeforeSort, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); /*--- Create a linear partition --- */ diff --git a/SU2_CFD/src/output/filewriter/CFVMDataSorter.cpp b/SU2_CFD/src/output/filewriter/CFVMDataSorter.cpp index a66663304d2c..d3deb547df44 100644 --- a/SU2_CFD/src/output/filewriter/CFVMDataSorter.cpp +++ b/SU2_CFD/src/output/filewriter/CFVMDataSorter.cpp @@ -241,7 +241,7 @@ void CFVMDataSorter::SortVolumetricConnectivity(CConfig *config, many cells it will receive from each other processor. ---*/ SU2_MPI::Alltoall(&(nElem_Send[1]), 1, MPI_INT, - &(nElem_Cum[1]), 1, MPI_INT, MPI_COMM_WORLD); + &(nElem_Cum[1]), 1, MPI_INT, SU2_MPI::GetComm()); /*--- Prepare to send connectivities. First check how many messages we will be sending and receiving. Here we also put @@ -377,7 +377,7 @@ void CFVMDataSorter::SortVolumetricConnectivity(CConfig *config, int source = ii; int tag = ii + 1; SU2_MPI::Irecv(&(connRecv[ll]), count, MPI_UNSIGNED_LONG, source, tag, - MPI_COMM_WORLD, &(recv_req[iMessage])); + SU2_MPI::GetComm(), &(recv_req[iMessage])); iMessage++; } } @@ -393,7 +393,7 @@ void CFVMDataSorter::SortVolumetricConnectivity(CConfig *config, int dest = ii; int tag = rank + 1; SU2_MPI::Isend(&(connSend[ll]), count, MPI_UNSIGNED_LONG, dest, tag, - MPI_COMM_WORLD, &(send_req[iMessage])); + SU2_MPI::GetComm(), &(send_req[iMessage])); iMessage++; } } @@ -409,7 +409,7 @@ void CFVMDataSorter::SortVolumetricConnectivity(CConfig *config, int source = ii; int tag = ii + 1; SU2_MPI::Irecv(&(haloRecv[ll]), count, MPI_UNSIGNED_SHORT, source, tag, - MPI_COMM_WORLD, &(recv_req[iMessage+nRecvs])); + SU2_MPI::GetComm(), &(recv_req[iMessage+nRecvs])); iMessage++; } } @@ -425,7 +425,7 @@ void CFVMDataSorter::SortVolumetricConnectivity(CConfig *config, int dest = ii; int tag = rank + 1; SU2_MPI::Isend(&(haloSend[ll]), count, MPI_UNSIGNED_SHORT, dest, tag, - MPI_COMM_WORLD, &(send_req[iMessage+nSends])); + SU2_MPI::GetComm(), &(send_req[iMessage+nSends])); iMessage++; } } diff --git a/SU2_CFD/src/output/filewriter/CParallelDataSorter.cpp b/SU2_CFD/src/output/filewriter/CParallelDataSorter.cpp index b8ba27ce7cf4..eeef9bdd0efa 100644 --- a/SU2_CFD/src/output/filewriter/CParallelDataSorter.cpp +++ b/SU2_CFD/src/output/filewriter/CParallelDataSorter.cpp @@ -142,7 +142,7 @@ void CParallelDataSorter::SortOutputData() { int source = ii; int tag = ii + 1; SU2_MPI::Irecv(&(doubleBuffer[ll]), count, MPI_DOUBLE, source, tag, - MPI_COMM_WORLD, &(recv_req[iMessage])); + SU2_MPI::GetComm(), &(recv_req[iMessage])); iMessage++; } } @@ -158,7 +158,7 @@ void CParallelDataSorter::SortOutputData() { int dest = ii; int tag = rank + 1; SU2_MPI::Isend(&(connSend[ll]), count, MPI_DOUBLE, dest, tag, - MPI_COMM_WORLD, &(send_req[iMessage])); + SU2_MPI::GetComm(), &(send_req[iMessage])); iMessage++; } } @@ -174,7 +174,7 @@ void CParallelDataSorter::SortOutputData() { int source = ii; int tag = ii + 1; SU2_MPI::Irecv(&(idRecv[ll]), count, MPI_UNSIGNED_LONG, source, tag, - MPI_COMM_WORLD, &(recv_req[iMessage+nRecvs])); + SU2_MPI::GetComm(), &(recv_req[iMessage+nRecvs])); iMessage++; } } @@ -190,7 +190,7 @@ void CParallelDataSorter::SortOutputData() { int dest = ii; int tag = rank + 1; SU2_MPI::Isend(&(idSend[ll]), count, MPI_UNSIGNED_LONG, dest, tag, - MPI_COMM_WORLD, &(send_req[iMessage+nSends])); + SU2_MPI::GetComm(), &(send_req[iMessage+nSends])); iMessage++; } } @@ -262,7 +262,7 @@ void CParallelDataSorter::SortOutputData() { /*--- Reduce the total number of points we will write in the output files. ---*/ SU2_MPI::Allreduce(&nPoints, &nPointsGlobal, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); /*--- Free temporary memory from communications ---*/ @@ -298,7 +298,7 @@ void CParallelDataSorter::PrepareSendBuffers(std::vector& globalI many cells it will receive from each other processor. ---*/ SU2_MPI::Alltoall(&(nPoint_Send[1]), 1, MPI_INT, - &(nPoint_Recv[1]), 1, MPI_INT, MPI_COMM_WORLD); + &(nPoint_Recv[1]), 1, MPI_INT, SU2_MPI::GetComm()); /*--- Prepare to send coordinates. First check how many messages we will be sending and receiving. Here we also put @@ -414,7 +414,7 @@ void CParallelDataSorter::SetTotalElements(){ /*--- Reduce the total number of cells we will be writing in the output files. ---*/ - SU2_MPI::Allreduce(nElemPerType.data(), nElemPerTypeGlobal.data(), N_ELEM_TYPES, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(nElemPerType.data(), nElemPerTypeGlobal.data(), N_ELEM_TYPES, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); nElemGlobal = std::accumulate(nElemPerTypeGlobal.begin(), nElemPerTypeGlobal.end(), 0); nElem = std::accumulate(nElemPerType.begin(), nElemPerType.end(), 0); @@ -451,10 +451,10 @@ void CParallelDataSorter::SetTotalElements(){ /*--- Communicate the local counts to all ranks for building offsets. ---*/ SU2_MPI::Alltoall(&(nElem_Send[1]), 1, MPI_INT, - &(nElem_Cum[1]), 1, MPI_INT, MPI_COMM_WORLD); + &(nElem_Cum[1]), 1, MPI_INT, SU2_MPI::GetComm()); SU2_MPI::Alltoall(&(nElemConn_Send[1]), 1, MPI_INT, - &(nElemConn_Cum[1]), 1, MPI_INT, MPI_COMM_WORLD); + &(nElemConn_Cum[1]), 1, MPI_INT, SU2_MPI::GetComm()); /*--- Put the counters into cumulative storage format. ---*/ diff --git a/SU2_CFD/src/output/filewriter/CParallelFileWriter.cpp b/SU2_CFD/src/output/filewriter/CParallelFileWriter.cpp index 579aa00a2397..a882011c27bf 100644 --- a/SU2_CFD/src/output/filewriter/CParallelFileWriter.cpp +++ b/SU2_CFD/src/output/filewriter/CParallelFileWriter.cpp @@ -216,14 +216,14 @@ bool CFileWriter::OpenMPIFile(){ to write a fresh output file, so we delete any existing files and create a new one. ---*/ - ierr = MPI_File_open(MPI_COMM_WORLD, fileName.c_str(), + ierr = MPI_File_open(SU2_MPI::GetComm(), fileName.c_str(), MPI_MODE_CREATE|MPI_MODE_EXCL|MPI_MODE_WRONLY, MPI_INFO_NULL, &fhw); if (ierr != MPI_SUCCESS) { MPI_File_close(&fhw); if (rank == 0) MPI_File_delete(fileName.c_str(), MPI_INFO_NULL); - ierr = MPI_File_open(MPI_COMM_WORLD, fileName.c_str(), + ierr = MPI_File_open(SU2_MPI::GetComm(), fileName.c_str(), MPI_MODE_CREATE|MPI_MODE_EXCL|MPI_MODE_WRONLY, MPI_INFO_NULL, &fhw); } @@ -264,7 +264,7 @@ bool CFileWriter::CloseMPIFile(){ su2double my_fileSize = fileSize; SU2_MPI::Allreduce(&my_fileSize, &fileSize, 1, - MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); /*--- Compute and store the bandwidth ---*/ diff --git a/SU2_CFD/src/output/filewriter/CParaviewFileWriter.cpp b/SU2_CFD/src/output/filewriter/CParaviewFileWriter.cpp index f1efcadf975e..a4a73b9a63b8 100644 --- a/SU2_CFD/src/output/filewriter/CParaviewFileWriter.cpp +++ b/SU2_CFD/src/output/filewriter/CParaviewFileWriter.cpp @@ -75,7 +75,7 @@ void CParaviewFileWriter::Write_Data(){ Paraview_File.close(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif /*--- Each processor opens the file. ---*/ @@ -99,7 +99,7 @@ void CParaviewFileWriter::Write_Data(){ Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif } @@ -124,7 +124,7 @@ void CParaviewFileWriter::Write_Data(){ Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif /*--- Write connectivity data. ---*/ @@ -196,7 +196,7 @@ void CParaviewFileWriter::Write_Data(){ } Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif } @@ -209,7 +209,7 @@ void CParaviewFileWriter::Write_Data(){ Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif for (iProcessor = 0; iProcessor < size; iProcessor++) { @@ -224,7 +224,7 @@ void CParaviewFileWriter::Write_Data(){ } Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif } @@ -236,7 +236,7 @@ void CParaviewFileWriter::Write_Data(){ Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif unsigned short varStart = 2; @@ -263,7 +263,7 @@ void CParaviewFileWriter::Write_Data(){ //skip Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif VarCounter++; } @@ -273,7 +273,7 @@ void CParaviewFileWriter::Write_Data(){ //skip Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif VarCounter++; } @@ -288,7 +288,7 @@ void CParaviewFileWriter::Write_Data(){ Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif /*--- Write surface and volumetric point coordinates. ---*/ @@ -307,7 +307,7 @@ void CParaviewFileWriter::Write_Data(){ Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif } @@ -323,7 +323,7 @@ void CParaviewFileWriter::Write_Data(){ Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif /*--- Write surface and volumetric point coordinates. ---*/ @@ -340,7 +340,7 @@ void CParaviewFileWriter::Write_Data(){ } Paraview_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif } diff --git a/SU2_CFD/src/output/filewriter/CSTLFileWriter.cpp b/SU2_CFD/src/output/filewriter/CSTLFileWriter.cpp index aba6a6bc5765..33281e83190a 100644 --- a/SU2_CFD/src/output/filewriter/CSTLFileWriter.cpp +++ b/SU2_CFD/src/output/filewriter/CSTLFileWriter.cpp @@ -157,7 +157,7 @@ void CSTLFileWriter::ReprocessElementConnectivity(){ for (unsigned long i = 0; i < num_halo_nodes; ++i) ++num_nodes_to_receive[neighbor_partitions[i]]; num_nodes_to_send.resize(size); - SU2_MPI::Alltoall(&num_nodes_to_receive[0], 1, MPI_INT, &num_nodes_to_send[0], 1, MPI_INT, MPI_COMM_WORLD); + SU2_MPI::Alltoall(&num_nodes_to_receive[0], 1, MPI_INT, &num_nodes_to_send[0], 1, MPI_INT, SU2_MPI::GetComm()); /* Now send the global node numbers whose data we need, and receive the same from all other ranks. @@ -182,7 +182,7 @@ void CSTLFileWriter::ReprocessElementConnectivity(){ if (sorted_halo_nodes.empty()) sorted_halo_nodes.resize(1); /* Avoid crash. */ SU2_MPI::Alltoallv(&sorted_halo_nodes[0], &num_nodes_to_receive[0], &nodes_to_receive_displacements[0], MPI_UNSIGNED_LONG, &nodes_to_send[0], &num_nodes_to_send[0], &nodes_to_send_displacements[0], MPI_UNSIGNED_LONG, - MPI_COMM_WORLD); + SU2_MPI::GetComm()); /* Now actually send and receive the data */ data_to_send.resize(max(1, total_num_nodes_to_send * fieldNames.size())); @@ -211,7 +211,7 @@ void CSTLFileWriter::ReprocessElementConnectivity(){ SU2_MPI::Alltoallv(&data_to_send[0], &num_values_to_send[0], &values_to_send_displacements[0], MPI_DOUBLE, &halo_var_data[0], &num_values_to_receive[0], &values_to_receive_displacements[0], MPI_DOUBLE, - MPI_COMM_WORLD); + SU2_MPI::GetComm()); } @@ -244,12 +244,12 @@ void CSTLFileWriter::GatherCoordData(){ to the master node with collective calls. ---*/ SU2_MPI::Allreduce(&nLocalTriaAll, &max_nLocalTriaAll, 1, - MPI_UNSIGNED_LONG, MPI_MAX, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_MAX, SU2_MPI::GetComm()); SU2_MPI::Gather(&nLocalTriaAll , 1, MPI_UNSIGNED_LONG, buffRecvTriaCount, 1, MPI_UNSIGNED_LONG, - MASTER_NODE, MPI_COMM_WORLD); + MASTER_NODE, SU2_MPI::GetComm()); /*--- Allocate buffer for send/recv of the coordinate data. Only the master rank allocates buffers for the recv. ---*/ buffSendCoords = new su2double[max_nLocalTriaAll*N_POINTS_TRIANGLE*3]; /* Triangle has 3 Points with 3 coords each */ @@ -262,7 +262,7 @@ void CSTLFileWriter::GatherCoordData(){ /*--- Collective comms of the solution data and global IDs. ---*/ SU2_MPI::Gather(buffSendCoords, static_cast(max_nLocalTriaAll*N_POINTS_TRIANGLE*3), MPI_DOUBLE, buffRecvCoords, static_cast(max_nLocalTriaAll*N_POINTS_TRIANGLE*3), MPI_DOUBLE, - MASTER_NODE, MPI_COMM_WORLD); + MASTER_NODE, SU2_MPI::GetComm()); /*--- Free temporary memory. ---*/ delete [] buffSendCoords; diff --git a/SU2_CFD/src/output/filewriter/CSU2FileWriter.cpp b/SU2_CFD/src/output/filewriter/CSU2FileWriter.cpp index 0e0de2555528..e7facb24ebb7 100644 --- a/SU2_CFD/src/output/filewriter/CSU2FileWriter.cpp +++ b/SU2_CFD/src/output/filewriter/CSU2FileWriter.cpp @@ -76,7 +76,7 @@ void CSU2FileWriter::Write_Data(){ /*--- Wait for iProcessor to finish and close the file. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); } /*--- Compute and store the write time. ---*/ diff --git a/SU2_CFD/src/output/filewriter/CSU2MeshFileWriter.cpp b/SU2_CFD/src/output/filewriter/CSU2MeshFileWriter.cpp index 5a0bf38d09e3..0c81dcc33d8f 100644 --- a/SU2_CFD/src/output/filewriter/CSU2MeshFileWriter.cpp +++ b/SU2_CFD/src/output/filewriter/CSU2MeshFileWriter.cpp @@ -114,7 +114,7 @@ void CSU2MeshFileWriter::Write_Data() { } /*--- Communicate offset, implies a barrier. ---*/ - SU2_MPI::Allreduce(&nElem, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nElem, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); } /*--- Write the node coordinates. ---*/ @@ -150,7 +150,7 @@ void CSU2MeshFileWriter::Write_Data() { } /*--- Communicate offset, implies a barrier. ---*/ - SU2_MPI::Allreduce(&myPoint, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&myPoint, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); } if (rank == MASTER_NODE) { @@ -248,5 +248,5 @@ void CSU2MeshFileWriter::Write_Data() { output_file.close(); } - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); } diff --git a/SU2_CFD/src/output/filewriter/CSurfaceFEMDataSorter.cpp b/SU2_CFD/src/output/filewriter/CSurfaceFEMDataSorter.cpp index 1a938d40883f..d548dd655b42 100644 --- a/SU2_CFD/src/output/filewriter/CSurfaceFEMDataSorter.cpp +++ b/SU2_CFD/src/output/filewriter/CSurfaceFEMDataSorter.cpp @@ -58,7 +58,7 @@ CSurfaceFEMDataSorter::CSurfaceFEMDataSorter(CConfig *config, CGeometry *geometr } SU2_MPI::Allreduce(&nLocalPointsBeforeSort, &nGlobalPointBeforeSort, 1, - MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); /*--- Create the linear partitioner --- */ @@ -154,7 +154,7 @@ void CSurfaceFEMDataSorter::SortOutputData() { vector nDOFRecv(size); SU2_MPI::Alltoall(nDOFSend.data(), 1, MPI_UNSIGNED_LONG, - nDOFRecv.data(), 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + nDOFRecv.data(), 1, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); /* Determine the number of messages this rank will receive. */ int nRankRecv = 0; @@ -172,7 +172,7 @@ void CSurfaceFEMDataSorter::SortOutputData() { for(int i=0; i num_nodes_to_send(size); - SU2_MPI::Alltoall(&num_nodes_to_receive[0], 1, MPI_INT, &num_nodes_to_send[0], 1, MPI_INT, MPI_COMM_WORLD); + SU2_MPI::Alltoall(&num_nodes_to_receive[0], 1, MPI_INT, &num_nodes_to_send[0], 1, MPI_INT, SU2_MPI::GetComm()); /* Now send the global node numbers whose data we need, and receive the same from all other ranks. @@ -237,7 +237,7 @@ void CTecplotBinaryFileWriter::Write_Data(){ if (sorted_halo_nodes.empty()) sorted_halo_nodes.resize(1); /* Avoid crash. */ SU2_MPI::Alltoallv(&sorted_halo_nodes[0], &num_nodes_to_receive[0], &nodes_to_receive_displacements[0], MPI_UNSIGNED_LONG, &nodes_to_send[0], &num_nodes_to_send[0], &nodes_to_send_displacements[0], MPI_UNSIGNED_LONG, - MPI_COMM_WORLD); + SU2_MPI::GetComm()); /* Now actually send and receive the data */ vector data_to_send(max(1, total_num_nodes_to_send * (int)fieldNames.size())); @@ -260,7 +260,7 @@ void CTecplotBinaryFileWriter::Write_Data(){ } CBaseMPIWrapper::Alltoallv(&data_to_send[0], &num_values_to_send[0], &values_to_send_displacements[0], MPI_DOUBLE, &halo_var_data[0], &num_values_to_receive[0], &values_to_receive_displacements[0], MPI_DOUBLE, - MPI_COMM_WORLD); + SU2_MPI::GetComm()); } else { /* Zone will be gathered to and output by MASTER_NODE */ @@ -290,7 +290,7 @@ void CTecplotBinaryFileWriter::Write_Data(){ vector var_data; unsigned long nPoint = dataSorter->GetnPoints(); vector num_points(size); - SU2_MPI::Gather(&nPoint, 1, MPI_UNSIGNED_LONG, &num_points[0], 1, MPI_UNSIGNED_LONG, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Gather(&nPoint, 1, MPI_UNSIGNED_LONG, &num_points[0], 1, MPI_UNSIGNED_LONG, MASTER_NODE, SU2_MPI::GetComm()); for(int iRank = 0; iRank < size; ++iRank) { int64_t rank_num_points = num_points[iRank]; @@ -308,7 +308,7 @@ void CTecplotBinaryFileWriter::Write_Data(){ } else { /* Receive data from other rank. */ var_data.resize(max((int64_t)1, (int64_t)fieldNames.size() * rank_num_points)); - CBaseMPIWrapper::Recv(&var_data[0], fieldNames.size() * rank_num_points, MPI_DOUBLE, iRank, iRank, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + CBaseMPIWrapper::Recv(&var_data[0], fieldNames.size() * rank_num_points, MPI_DOUBLE, iRank, iRank, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); for (iVar = 0; err == 0 && iVar < fieldNames.size(); iVar++) { err = tecZoneVarWriteDoubleValues(file_handle, zone, iVar + 1, 0, rank_num_points, &var_data[iVar * rank_num_points]); if (err) cout << rank << ": Error outputting Tecplot surface variable values." << endl; @@ -320,7 +320,7 @@ void CTecplotBinaryFileWriter::Write_Data(){ else { /* Send data to MASTER_NODE */ unsigned long nPoint = dataSorter->GetnPoints(); - SU2_MPI::Gather(&nPoint, 1, MPI_UNSIGNED_LONG, NULL, 1, MPI_UNSIGNED_LONG, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Gather(&nPoint, 1, MPI_UNSIGNED_LONG, NULL, 1, MPI_UNSIGNED_LONG, MASTER_NODE, SU2_MPI::GetComm()); vector var_data; size_t var_data_size = fieldNames.size() * dataSorter->GetnPoints(); @@ -330,7 +330,7 @@ void CTecplotBinaryFileWriter::Write_Data(){ var_data.push_back(dataSorter->GetData(iVar,i)); if (var_data.size() > 0) - CBaseMPIWrapper::Send(&var_data[0], static_cast(var_data.size()), MPI_DOUBLE, MASTER_NODE, rank, MPI_COMM_WORLD); + CBaseMPIWrapper::Send(&var_data[0], static_cast(var_data.size()), MPI_DOUBLE, MASTER_NODE, rank, SU2_MPI::GetComm()); } } @@ -431,7 +431,7 @@ void CTecplotBinaryFileWriter::Write_Data(){ vector connectivity_sizes(size); unsigned long unused = 0; - SU2_MPI::Gather(&unused, 1, MPI_UNSIGNED_LONG, &connectivity_sizes[0], 1, MPI_UNSIGNED_LONG, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Gather(&unused, 1, MPI_UNSIGNED_LONG, &connectivity_sizes[0], 1, MPI_UNSIGNED_LONG, MASTER_NODE, SU2_MPI::GetComm()); vector connectivity; for(int iRank = 0; iRank < size; ++iRank) { if (iRank == rank) { @@ -462,7 +462,7 @@ void CTecplotBinaryFileWriter::Write_Data(){ } else { /* Receive node map and write out. */ connectivity.resize(max((unsigned long)1, connectivity_sizes[iRank])); - SU2_MPI::Recv(&connectivity[0], connectivity_sizes[iRank], MPI_UNSIGNED_LONG, iRank, iRank, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + SU2_MPI::Recv(&connectivity[0], connectivity_sizes[iRank], MPI_UNSIGNED_LONG, iRank, iRank, SU2_MPI::GetComm(), MPI_STATUS_IGNORE); err = tecZoneNodeMapWrite64(file_handle, zone, 0, 1, connectivity_sizes[iRank], &connectivity[0]); if (err) cout << rank << ": Error outputting Tecplot node values." << endl; } @@ -473,7 +473,7 @@ void CTecplotBinaryFileWriter::Write_Data(){ unsigned long connectivity_size; connectivity_size = 2 * nParallel_Line + 4 * (nParallel_Tria + nParallel_Quad); - SU2_MPI::Gather(&connectivity_size, 1, MPI_UNSIGNED_LONG, NULL, 1, MPI_UNSIGNED_LONG, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Gather(&connectivity_size, 1, MPI_UNSIGNED_LONG, NULL, 1, MPI_UNSIGNED_LONG, MASTER_NODE, SU2_MPI::GetComm()); vector connectivity; connectivity.reserve(connectivity_size); for (iElem = 0; err == 0 && iElem < nParallel_Line; iElem++) { @@ -496,7 +496,7 @@ void CTecplotBinaryFileWriter::Write_Data(){ } if (connectivity.empty()) connectivity.resize(1); /* Avoid crash */ - SU2_MPI::Send(&connectivity[0], connectivity_size, MPI_UNSIGNED_LONG, MASTER_NODE, rank, MPI_COMM_WORLD); + SU2_MPI::Send(&connectivity[0], connectivity_size, MPI_UNSIGNED_LONG, MASTER_NODE, rank, SU2_MPI::GetComm()); } } #else diff --git a/SU2_CFD/src/output/filewriter/CTecplotFileWriter.cpp b/SU2_CFD/src/output/filewriter/CTecplotFileWriter.cpp index 4236ab560a2a..c0631451dd3e 100644 --- a/SU2_CFD/src/output/filewriter/CTecplotFileWriter.cpp +++ b/SU2_CFD/src/output/filewriter/CTecplotFileWriter.cpp @@ -118,7 +118,7 @@ void CTecplotFileWriter::Write_Data(){ } #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif /*--- Each processor opens the file. ---*/ @@ -142,7 +142,7 @@ void CTecplotFileWriter::Write_Data(){ Tecplot_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif } @@ -204,7 +204,7 @@ void CTecplotFileWriter::Write_Data(){ } Tecplot_File.flush(); #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif } diff --git a/SU2_CFD/src/output/output_structure_legacy.cpp b/SU2_CFD/src/output/output_structure_legacy.cpp index b3aabf724d9a..c10a2bf1d866 100644 --- a/SU2_CFD/src/output/output_structure_legacy.cpp +++ b/SU2_CFD/src/output/output_structure_legacy.cpp @@ -4685,7 +4685,7 @@ void COutputLegacy::SetCp_InverseDesign(CSolver *solver_container, CGeometry *ge nPointLocal = geometry->GetnPoint(); #ifdef HAVE_MPI - SU2_MPI::Allreduce(&nPointLocal, &nPointGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nPointLocal, &nPointGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); #else nPointGlobal = nPointLocal; #endif @@ -4811,7 +4811,7 @@ void COutputLegacy::SetCp_InverseDesign(CSolver *solver_container, CGeometry *ge #ifdef HAVE_MPI su2double MyPressDiff = PressDiff; PressDiff = 0.0; - SU2_MPI::Allreduce(&MyPressDiff, &PressDiff, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyPressDiff, &PressDiff, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); #endif /*--- Update the total Cp difference coeffient ---*/ @@ -4837,7 +4837,7 @@ void COutputLegacy::SetHeatFlux_InverseDesign(CSolver *solver_container, CGeomet nPointLocal = geometry->GetnPoint(); #ifdef HAVE_MPI - SU2_MPI::Allreduce(&nPointLocal, &nPointGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nPointLocal, &nPointGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); #else nPointGlobal = nPointLocal; #endif @@ -4962,7 +4962,7 @@ void COutputLegacy::SetHeatFlux_InverseDesign(CSolver *solver_container, CGeomet #ifdef HAVE_MPI su2double MyHeatFluxDiff = HeatFluxDiff; HeatFluxDiff = 0.0; - SU2_MPI::Allreduce(&MyHeatFluxDiff, &HeatFluxDiff, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyHeatFluxDiff, &HeatFluxDiff, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); #endif /*--- Update the total HeatFlux difference coeffient ---*/ @@ -5102,7 +5102,7 @@ void COutputLegacy::SpecialOutput_SonicBoom(CSolver *solver, CGeometry *geometry #else int nProcessor; - SU2_MPI::Comm_size(MPI_COMM_WORLD, &nProcessor); + SU2_MPI::Comm_size(SU2_MPI::GetComm(), &nProcessor); unsigned long nLocalVertex_NearField = 0, MaxLocalVertex_NearField = 0; int iProcessor; @@ -5132,9 +5132,9 @@ void COutputLegacy::SpecialOutput_SonicBoom(CSolver *solver, CGeometry *geometry /*--- Send Near-Field vertex information --*/ - SU2_MPI::Allreduce(&nLocalVertex_NearField, &nVertex_NearField, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&nLocalVertex_NearField, &MaxLocalVertex_NearField, 1, MPI_UNSIGNED_LONG, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_nVertex, 1, MPI_UNSIGNED_LONG, Buffer_Receive_nVertex, 1, MPI_UNSIGNED_LONG, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nLocalVertex_NearField, &nVertex_NearField, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&nLocalVertex_NearField, &MaxLocalVertex_NearField, 1, MPI_UNSIGNED_LONG, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_nVertex, 1, MPI_UNSIGNED_LONG, Buffer_Receive_nVertex, 1, MPI_UNSIGNED_LONG, MASTER_NODE, SU2_MPI::GetComm()); delete [] Buffer_Send_nVertex; su2double *Buffer_Send_Xcoord = new su2double[MaxLocalVertex_NearField]; @@ -5197,12 +5197,12 @@ void COutputLegacy::SpecialOutput_SonicBoom(CSolver *solver, CGeometry *geometry /*--- Send all the information --*/ - SU2_MPI::Gather(Buffer_Send_Xcoord, nBuffer_Xcoord, MPI_DOUBLE, Buffer_Receive_Xcoord, nBuffer_Xcoord, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_Ycoord, nBuffer_Ycoord, MPI_DOUBLE, Buffer_Receive_Ycoord, nBuffer_Ycoord, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_Zcoord, nBuffer_Zcoord, MPI_DOUBLE, Buffer_Receive_Zcoord, nBuffer_Zcoord, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_IdPoint, nBuffer_IdPoint, MPI_UNSIGNED_LONG, Buffer_Receive_IdPoint, nBuffer_IdPoint, MPI_UNSIGNED_LONG, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_Pressure, nBuffer_Pressure, MPI_DOUBLE, Buffer_Receive_Pressure, nBuffer_Pressure, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_FaceArea, nBuffer_FaceArea, MPI_DOUBLE, Buffer_Receive_FaceArea, nBuffer_FaceArea, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Gather(Buffer_Send_Xcoord, nBuffer_Xcoord, MPI_DOUBLE, Buffer_Receive_Xcoord, nBuffer_Xcoord, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_Ycoord, nBuffer_Ycoord, MPI_DOUBLE, Buffer_Receive_Ycoord, nBuffer_Ycoord, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_Zcoord, nBuffer_Zcoord, MPI_DOUBLE, Buffer_Receive_Zcoord, nBuffer_Zcoord, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_IdPoint, nBuffer_IdPoint, MPI_UNSIGNED_LONG, Buffer_Receive_IdPoint, nBuffer_IdPoint, MPI_UNSIGNED_LONG, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_Pressure, nBuffer_Pressure, MPI_DOUBLE, Buffer_Receive_Pressure, nBuffer_Pressure, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_FaceArea, nBuffer_FaceArea, MPI_DOUBLE, Buffer_Receive_FaceArea, nBuffer_FaceArea, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); delete [] Buffer_Send_Xcoord; delete [] Buffer_Send_Ycoord; delete [] Buffer_Send_Zcoord; @@ -5592,7 +5592,7 @@ void COutputLegacy::SpecialOutput_SonicBoom(CSolver *solver, CGeometry *geometry /*--- Send the value of the NearField coefficient to all the processors ---*/ - SU2_MPI::Bcast(&InverseDesign, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(&InverseDesign, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); /*--- Store the value of the NearField coefficient ---*/ @@ -5692,8 +5692,8 @@ void COutputLegacy::SpecialOutput_Distortion(CSolver *solver, CGeometry *geometr if (rank == MASTER_NODE) Buffer_Recv_nVertex = new unsigned long [nProcessor]; #ifdef HAVE_MPI - SU2_MPI::Allreduce(&nLocalVertex_Surface, &MaxLocalVertex_Surface, 1, MPI_UNSIGNED_LONG, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Gather(&Buffer_Send_nVertex, 1, MPI_UNSIGNED_LONG, Buffer_Recv_nVertex, 1, MPI_UNSIGNED_LONG, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nLocalVertex_Surface, &MaxLocalVertex_Surface, 1, MPI_UNSIGNED_LONG, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Gather(&Buffer_Send_nVertex, 1, MPI_UNSIGNED_LONG, Buffer_Recv_nVertex, 1, MPI_UNSIGNED_LONG, MASTER_NODE, SU2_MPI::GetComm()); #else MaxLocalVertex_Surface = nLocalVertex_Surface; Buffer_Recv_nVertex[MASTER_NODE] = Buffer_Send_nVertex[MASTER_NODE]; @@ -5840,19 +5840,19 @@ void COutputLegacy::SpecialOutput_Distortion(CSolver *solver, CGeometry *geometr #ifdef HAVE_MPI - SU2_MPI::Gather(Buffer_Send_Coord_x, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Coord_x, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_Coord_y, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Coord_y, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - if (nDim == 3) SU2_MPI::Gather(Buffer_Send_Coord_z, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Coord_z, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_PT, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_PT, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_TT, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_TT, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_P, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_P, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_T, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_T, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_Mach, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Mach, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_Vel_x, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Vel_x, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_Vel_y, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Vel_y, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - if (nDim == 3) SU2_MPI::Gather(Buffer_Send_Vel_z, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Vel_z, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_q, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_q, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Gather(Buffer_Send_Area, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Area, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Gather(Buffer_Send_Coord_x, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Coord_x, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_Coord_y, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Coord_y, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + if (nDim == 3) SU2_MPI::Gather(Buffer_Send_Coord_z, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Coord_z, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_PT, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_PT, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_TT, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_TT, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_P, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_P, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_T, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_T, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_Mach, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Mach, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_Vel_x, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Vel_x, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_Vel_y, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Vel_y, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + if (nDim == 3) SU2_MPI::Gather(Buffer_Send_Vel_z, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Vel_z, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_q, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_q, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Gather(Buffer_Send_Area, MaxLocalVertex_Surface, MPI_DOUBLE, Buffer_Recv_Area, MaxLocalVertex_Surface, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); #else @@ -7568,19 +7568,19 @@ void COutputLegacy::SpecialOutput_AnalyzeSurface(CSolver *solver, CGeometry *geo #ifdef HAVE_MPI if (config->GetComm_Level() == COMM_FULL) { - SU2_MPI::Allreduce(Surface_MassFlow_Local, Surface_MassFlow_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Mach_Local, Surface_Mach_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Temperature_Local, Surface_Temperature_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Density_Local, Surface_Density_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Enthalpy_Local, Surface_Enthalpy_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_NormalVelocity_Local, Surface_NormalVelocity_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_StreamVelocity2_Local, Surface_StreamVelocity2_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_TransvVelocity2_Local, Surface_TransvVelocity2_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Pressure_Local, Surface_Pressure_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_TotalTemperature_Local, Surface_TotalTemperature_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_TotalPressure_Local, Surface_TotalPressure_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_Area_Local, Surface_Area_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Surface_MassFlow_Abs_Local, Surface_MassFlow_Abs_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(Surface_MassFlow_Local, Surface_MassFlow_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Mach_Local, Surface_Mach_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Temperature_Local, Surface_Temperature_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Density_Local, Surface_Density_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Enthalpy_Local, Surface_Enthalpy_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_NormalVelocity_Local, Surface_NormalVelocity_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_StreamVelocity2_Local, Surface_StreamVelocity2_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_TransvVelocity2_Local, Surface_TransvVelocity2_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Pressure_Local, Surface_Pressure_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_TotalTemperature_Local, Surface_TotalTemperature_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_TotalPressure_Local, Surface_TotalPressure_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_Area_Local, Surface_Area_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Surface_MassFlow_Abs_Local, Surface_MassFlow_Abs_Total, nMarker_Analyze, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); } #else diff --git a/SU2_CFD/src/python_wrapper_structure.cpp b/SU2_CFD/src/python_wrapper_structure.cpp index 1e94251fdd02..fde9093bc583 100644 --- a/SU2_CFD/src/python_wrapper_structure.cpp +++ b/SU2_CFD/src/python_wrapper_structure.cpp @@ -35,7 +35,7 @@ void CDriver::PythonInterface_Preprocessing(CConfig **config, CGeometry ****geom int rank = MASTER_NODE; #ifdef HAVE_MPI - MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_rank(SU2_MPI::GetComm(), &rank); #endif /* --- Initialize boundary conditions customization, this is achieve through the Python wrapper --- */ @@ -855,7 +855,7 @@ void CFluidDriver::StaticMeshUpdate() { int rank = MASTER_NODE; #ifdef HAVE_MPI - MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_rank(SU2_MPI::GetComm(), &rank); #endif for(iZone = 0; iZone < nZone; iZone++) { @@ -958,7 +958,7 @@ void CFluidDriver::BoundaryConditionsUpdate(){ unsigned short iZone; #ifdef HAVE_MPI - MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_rank(SU2_MPI::GetComm(), &rank); #endif if(rank == MASTER_NODE) cout << "Updating boundary conditions." << endl; diff --git a/SU2_CFD/src/solvers/CAdjEulerSolver.cpp b/SU2_CFD/src/solvers/CAdjEulerSolver.cpp index 3ffa769de6f6..ea07d1cd5bd8 100644 --- a/SU2_CFD/src/solvers/CAdjEulerSolver.cpp +++ b/SU2_CFD/src/solvers/CAdjEulerSolver.cpp @@ -316,7 +316,7 @@ CAdjEulerSolver::CAdjEulerSolver(CGeometry *geometry, CConfig *config, unsigned #ifdef HAVE_MPI Area_Monitored = 0.0; - SU2_MPI::Allreduce(&myArea_Monitored, &Area_Monitored, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&myArea_Monitored, &Area_Monitored, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); #else Area_Monitored = myArea_Monitored; #endif @@ -478,7 +478,7 @@ void CAdjEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geo /*--- Communicate the counts to iDomain with non-blocking sends ---*/ - SU2_MPI::Isend(&nPointTotal_s[iDomain], 1, MPI_UNSIGNED_LONG, iDomain, iDomain, MPI_COMM_WORLD, &req); + SU2_MPI::Isend(&nPointTotal_s[iDomain], 1, MPI_UNSIGNED_LONG, iDomain, iDomain, SU2_MPI::GetComm(), &req); SU2_MPI::Request_free(&req); } else { @@ -504,7 +504,7 @@ void CAdjEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geo /*--- Recv the data by probing for the current sender, jDomain, first and then receiving the values from it. ---*/ - SU2_MPI::Recv(&nPointTotal_r[jDomain], 1, MPI_UNSIGNED_LONG, jDomain, rank, MPI_COMM_WORLD, &status); + SU2_MPI::Recv(&nPointTotal_r[jDomain], 1, MPI_UNSIGNED_LONG, jDomain, rank, SU2_MPI::GetComm(), &status); } } @@ -514,7 +514,7 @@ void CAdjEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geo /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- Initialize the counters for the larger send buffers (by domain) ---*/ @@ -568,7 +568,7 @@ void CAdjEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geo SU2_MPI::Isend(&Buffer_Send_AdjVar[PointTotal_Counter*(nVar+3)], nPointTotal_s[iDomain]*(nVar+3), MPI_DOUBLE, iDomain, - iDomain, MPI_COMM_WORLD, &req); + iDomain, SU2_MPI::GetComm(), &req); SU2_MPI::Request_free(&req); } @@ -612,7 +612,7 @@ void CAdjEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geo /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- The next section begins the recv of all data for the interior points/elements in the mesh. First, create the domain structures for @@ -632,7 +632,7 @@ void CAdjEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geo /*--- Receive the buffers with the coords, global index, and colors ---*/ SU2_MPI::Recv(Buffer_Receive_AdjVar, nPointTotal_r[iDomain]*(nVar+3) , MPI_DOUBLE, - iDomain, rank, MPI_COMM_WORLD, &status); + iDomain, rank, SU2_MPI::GetComm(), &status); /*--- Loop over all of the points that we have recv'd and store the coords, global index vertex and markers ---*/ @@ -664,7 +664,7 @@ void CAdjEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geo /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- Free all of the memory used for communicating points and elements ---*/ @@ -711,7 +711,7 @@ void CAdjEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { nDomain = size; - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- This loop gets the array sizes of points for each rank to send to each other rank. ---*/ @@ -763,7 +763,7 @@ void CAdjEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Communicate the counts to iDomain with non-blocking sends ---*/ - SU2_MPI::Isend(&nPointTotal_s[iDomain], 1, MPI_UNSIGNED_LONG, iDomain, iDomain, MPI_COMM_WORLD, &req); + SU2_MPI::Isend(&nPointTotal_s[iDomain], 1, MPI_UNSIGNED_LONG, iDomain, iDomain, SU2_MPI::GetComm(), &req); SU2_MPI::Request_free(&req); } else { @@ -789,7 +789,7 @@ void CAdjEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Recv the data by probing for the current sender, jDomain, first and then receiving the values from it. ---*/ - SU2_MPI::Recv(&nPointTotal_r[jDomain], 1, MPI_UNSIGNED_LONG, jDomain, rank, MPI_COMM_WORLD, &status); + SU2_MPI::Recv(&nPointTotal_r[jDomain], 1, MPI_UNSIGNED_LONG, jDomain, rank, SU2_MPI::GetComm(), &status); } } @@ -799,7 +799,7 @@ void CAdjEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- Initialize the counters for the larger send buffers (by domain) ---*/ @@ -852,7 +852,7 @@ void CAdjEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { SU2_MPI::Isend(&Buffer_Send_AdjVar[PointTotal_Counter*(nVar+3)], nPointTotal_s[iDomain]*(nVar+3), MPI_DOUBLE, iDomain, - iDomain, MPI_COMM_WORLD, &req); + iDomain, SU2_MPI::GetComm(), &req); SU2_MPI::Request_free(&req); } @@ -896,7 +896,7 @@ void CAdjEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- The next section begins the recv of all data for the interior points/elements in the mesh. First, create the domain structures for @@ -916,7 +916,7 @@ void CAdjEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Receive the buffers with the coords, global index, and colors ---*/ SU2_MPI::Recv(Buffer_Receive_AdjVar, nPointTotal_r[iDomain]*(nVar+3) , MPI_DOUBLE, - iDomain, rank, MPI_COMM_WORLD, &status); + iDomain, rank, SU2_MPI::GetComm(), &status); /*--- Loop over all of the points that we have recv'd and store the @@ -949,7 +949,7 @@ void CAdjEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- Free all of the memory used for communicating points and elements ---*/ @@ -1620,7 +1620,7 @@ void CAdjEulerSolver::Preprocessing(CGeometry *geometry, CSolver **solver_contai if (config->GetComm_Level() == COMM_FULL) { #ifdef HAVE_MPI unsigned long MyErrorCounter = nonPhysicalPoints; nonPhysicalPoints = 0; - SU2_MPI::Allreduce(&MyErrorCounter, &nonPhysicalPoints, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyErrorCounter, &nonPhysicalPoints, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); #endif if (iMesh == MESH_0) config->SetNonphysical_Points(nonPhysicalPoints); } @@ -1832,7 +1832,7 @@ void CAdjEulerSolver::Upwind_Residual(CGeometry *geometry, CSolver **solver_cont if (config->GetComm_Level() == COMM_FULL) { #ifdef HAVE_MPI - SU2_MPI::Reduce(&counter_local, &counter_global, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&counter_local, &counter_global, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); #else counter_global = counter_local; #endif @@ -2696,12 +2696,12 @@ void CAdjEulerSolver::Inviscid_Sensitivity(CGeometry *geometry, CSolver **solver su2double MyTotal_Sens_Temp = Total_Sens_Temp; Total_Sens_Temp = 0.0; su2double MyTotal_Sens_BPress = Total_Sens_BPress; Total_Sens_BPress = 0.0; - SU2_MPI::Allreduce(&MyTotal_Sens_Geo, &Total_Sens_Geo, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyTotal_Sens_Mach, &Total_Sens_Mach, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyTotal_Sens_AoA, &Total_Sens_AoA, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyTotal_Sens_Press, &Total_Sens_Press, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyTotal_Sens_Temp, &Total_Sens_Temp, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyTotal_Sens_BPress, &Total_Sens_BPress, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyTotal_Sens_Geo, &Total_Sens_Geo, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyTotal_Sens_Mach, &Total_Sens_Mach, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyTotal_Sens_AoA, &Total_Sens_AoA, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyTotal_Sens_Press, &Total_Sens_Press, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyTotal_Sens_Temp, &Total_Sens_Temp, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyTotal_Sens_BPress, &Total_Sens_BPress, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); #endif @@ -4752,7 +4752,7 @@ void CAdjEulerSolver::LoadRestart(CGeometry **geometry, CSolver ***solver, CConf #ifndef HAVE_MPI rbuf_NotMatching = sbuf_NotMatching; #else - SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_SUM, SU2_MPI::GetComm()); #endif if (rbuf_NotMatching != 0) { SU2_MPI::Error(string("The solution file ") + filename + string(" doesn't match with the mesh file!\n") + diff --git a/SU2_CFD/src/solvers/CAdjNSSolver.cpp b/SU2_CFD/src/solvers/CAdjNSSolver.cpp index 980b7b32dde3..439aa053d6f7 100644 --- a/SU2_CFD/src/solvers/CAdjNSSolver.cpp +++ b/SU2_CFD/src/solvers/CAdjNSSolver.cpp @@ -254,7 +254,7 @@ CAdjNSSolver::CAdjNSSolver(CGeometry *geometry, CConfig *config, unsigned short #ifdef HAVE_MPI Area_Monitored = 0.0; - SU2_MPI::Allreduce(&myArea_Monitored, &Area_Monitored, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&myArea_Monitored, &Area_Monitored, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); #else Area_Monitored = myArea_Monitored; #endif @@ -395,7 +395,7 @@ void CAdjNSSolver::Preprocessing(CGeometry *geometry, CSolver **solver_container if (config->GetComm_Level() == COMM_FULL) { #ifdef HAVE_MPI unsigned long MyErrorCounter = nonPhysicalPoints; nonPhysicalPoints = 0; - SU2_MPI::Allreduce(&MyErrorCounter, &nonPhysicalPoints, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyErrorCounter, &nonPhysicalPoints, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); #endif if (iMesh == MESH_0) config->SetNonphysical_Points(nonPhysicalPoints); } @@ -1155,11 +1155,11 @@ void CAdjNSSolver::Viscous_Sensitivity(CGeometry *geometry, CSolver **solver_con su2double MyTotal_Sens_Press = Total_Sens_Press; Total_Sens_Press = 0.0; su2double MyTotal_Sens_Temp = Total_Sens_Temp; Total_Sens_Temp = 0.0; - SU2_MPI::Allreduce(&MyTotal_Sens_Geo, &Total_Sens_Geo, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyTotal_Sens_Mach, &Total_Sens_Mach, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyTotal_Sens_AoA, &Total_Sens_AoA, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyTotal_Sens_Press, &Total_Sens_Press, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyTotal_Sens_Temp, &Total_Sens_Temp, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyTotal_Sens_Geo, &Total_Sens_Geo, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyTotal_Sens_Mach, &Total_Sens_Mach, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyTotal_Sens_AoA, &Total_Sens_AoA, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyTotal_Sens_Press, &Total_Sens_Press, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyTotal_Sens_Temp, &Total_Sens_Temp, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); #endif diff --git a/SU2_CFD/src/solvers/CAdjTurbSolver.cpp b/SU2_CFD/src/solvers/CAdjTurbSolver.cpp index 59b5643032d6..01c8a58bf99d 100644 --- a/SU2_CFD/src/solvers/CAdjTurbSolver.cpp +++ b/SU2_CFD/src/solvers/CAdjTurbSolver.cpp @@ -161,7 +161,7 @@ CAdjTurbSolver::CAdjTurbSolver(CGeometry *geometry, CConfig *config, unsigned sh #ifndef HAVE_MPI rbuf_NotMatching = sbuf_NotMatching; #else - SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_SUM, SU2_MPI::GetComm()); #endif if (rbuf_NotMatching != 0) { SU2_MPI::Error(string("The solution file ") + filename + string(" doesn't match with the mesh file!\n") + diff --git a/SU2_CFD/src/solvers/CBaselineSolver.cpp b/SU2_CFD/src/solvers/CBaselineSolver.cpp index e669add5f441..7900acc2a9ba 100644 --- a/SU2_CFD/src/solvers/CBaselineSolver.cpp +++ b/SU2_CFD/src/solvers/CBaselineSolver.cpp @@ -157,7 +157,7 @@ void CBaselineSolver::SetOutputVariables(CGeometry *geometry, CConfig *config) { /*--- All ranks open the file using MPI. ---*/ - ierr = MPI_File_open(MPI_COMM_WORLD, fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); + ierr = MPI_File_open(SU2_MPI::GetComm(), fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); /*--- Error check opening the file. ---*/ @@ -175,7 +175,7 @@ void CBaselineSolver::SetOutputVariables(CGeometry *geometry, CConfig *config) { /*--- Broadcast the number of variables to all procs and store more clearly. ---*/ - SU2_MPI::Bcast(var_buf, nVar_Buf, MPI_INT, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(var_buf, nVar_Buf, MPI_INT, MASTER_NODE, SU2_MPI::GetComm()); /*--- Check that this is an SU2 binary file. SU2 binary files have the hex representation of "SU2" as the first int in the file. ---*/ @@ -208,7 +208,7 @@ void CBaselineSolver::SetOutputVariables(CGeometry *geometry, CConfig *config) { /*--- Broadcast the string names of the variables. ---*/ SU2_MPI::Bcast(mpi_str_buf, nVar*CGNS_STRING_SIZE, MPI_CHAR, - MASTER_NODE, MPI_COMM_WORLD); + MASTER_NODE, SU2_MPI::GetComm()); fields.push_back("Point_ID"); @@ -283,7 +283,7 @@ void CBaselineSolver::SetOutputVariables(CGeometry *geometry, CConfig *config) { /*--- All ranks open the file using MPI. ---*/ - ierr = MPI_File_open(MPI_COMM_WORLD, fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); + ierr = MPI_File_open(SU2_MPI::GetComm(), fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); /*--- Error check opening the file. ---*/ @@ -298,7 +298,7 @@ void CBaselineSolver::SetOutputVariables(CGeometry *geometry, CConfig *config) { /*--- Broadcast the number of variables to all procs and store clearly. ---*/ - SU2_MPI::Bcast(&magic_number, 1, MPI_INT, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(&magic_number, 1, MPI_INT, MASTER_NODE, SU2_MPI::GetComm()); /*--- Check that this is an SU2 binary file. SU2 binary files have the hex representation of "SU2" as the first int in the file. ---*/ diff --git a/SU2_CFD/src/solvers/CBaselineSolver_FEM.cpp b/SU2_CFD/src/solvers/CBaselineSolver_FEM.cpp index 63b07e05d521..faf3c855c8fd 100644 --- a/SU2_CFD/src/solvers/CBaselineSolver_FEM.cpp +++ b/SU2_CFD/src/solvers/CBaselineSolver_FEM.cpp @@ -65,7 +65,7 @@ CBaselineSolver_FEM::CBaselineSolver_FEM(CGeometry *geometry, CConfig *config) { /*--- Determine the global number of DOFs. ---*/ #ifdef HAVE_MPI - SU2_MPI::Allreduce(&nDOFsLocOwned, &nDOFsGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nDOFsLocOwned, &nDOFsGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); #else nDOFsGlobal = nDOFsLocOwned; #endif @@ -157,7 +157,7 @@ void CBaselineSolver_FEM::SetOutputVariables(CGeometry *geometry, CConfig *confi char fname[100]; strcpy(fname, filename.c_str()); - ierr = MPI_File_open(MPI_COMM_WORLD, fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); + ierr = MPI_File_open(SU2_MPI::GetComm(), fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); /*--- Error check opening the file. ---*/ @@ -174,7 +174,7 @@ void CBaselineSolver_FEM::SetOutputVariables(CGeometry *geometry, CConfig *confi /*--- Broadcast the number of variables to all procs and store more clearly. ---*/ - SU2_MPI::Bcast(var_buf, nVar_Buf, MPI_INT, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(var_buf, nVar_Buf, MPI_INT, MASTER_NODE, SU2_MPI::GetComm()); /*--- Check that this is an SU2 binary file. SU2 binary files have the hex representation of "SU2" as the first int in the file. ---*/ @@ -244,7 +244,7 @@ void CBaselineSolver_FEM::SetOutputVariables(CGeometry *geometry, CConfig *confi char fname[100]; strcpy(fname, filename.c_str()); - ierr = MPI_File_open(MPI_COMM_WORLD, fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); + ierr = MPI_File_open(SU2_MPI::GetComm(), fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); /*--- Error check opening the file. ---*/ @@ -259,7 +259,7 @@ void CBaselineSolver_FEM::SetOutputVariables(CGeometry *geometry, CConfig *confi /*--- Broadcast the number of variables to all procs and store clearly. ---*/ - SU2_MPI::Bcast(&magic_number, 1, MPI_INT, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(&magic_number, 1, MPI_INT, MASTER_NODE, SU2_MPI::GetComm()); /*--- Check that this is an SU2 binary file. SU2 binary files have the hex representation of "SU2" as the first int in the file. ---*/ @@ -368,7 +368,7 @@ void CBaselineSolver_FEM::LoadRestart(CGeometry **geometry, CSolver ***solver, C #ifdef HAVE_MPI unsigned short sbuf_NotMatching = rbuf_NotMatching; - SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_MAX, SU2_MPI::GetComm()); #endif if (rbuf_NotMatching != 0) diff --git a/SU2_CFD/src/solvers/CDiscAdjFEASolver.cpp b/SU2_CFD/src/solvers/CDiscAdjFEASolver.cpp index 06bbc899e635..a0d9d81596e0 100644 --- a/SU2_CFD/src/solvers/CDiscAdjFEASolver.cpp +++ b/SU2_CFD/src/solvers/CDiscAdjFEASolver.cpp @@ -639,10 +639,10 @@ void CDiscAdjFEASolver::ExtractAdjoint_Variables(CGeometry *geometry, CConfig *c } } - SU2_MPI::Allreduce(Local_Sens_E, Global_Sens_E, nMPROP, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Local_Sens_Nu, Global_Sens_Nu, nMPROP, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Local_Sens_Rho, Global_Sens_Rho, nMPROP, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Local_Sens_Rho_DL, Global_Sens_Rho_DL, nMPROP, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(Local_Sens_E, Global_Sens_E, nMPROP, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Local_Sens_Nu, Global_Sens_Nu, nMPROP, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Local_Sens_Rho, Global_Sens_Rho, nMPROP, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Local_Sens_Rho_DL, Global_Sens_Rho_DL, nMPROP, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); /*--- Extract the adjoint values of the electric field in the case that it is a parameter of the problem. ---*/ @@ -651,7 +651,7 @@ void CDiscAdjFEASolver::ExtractAdjoint_Variables(CGeometry *geometry, CConfig *c if (local_index) Local_Sens_EField[iVar] = AD::GetDerivative(AD_Idx_EField[iVar]); else Local_Sens_EField[iVar] = SU2_TYPE::GetDerivative(EField[iVar]); } - SU2_MPI::Allreduce(Local_Sens_EField, Global_Sens_EField, nEField, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(Local_Sens_EField, Global_Sens_EField, nEField, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); } if (fea_dv) { @@ -659,7 +659,7 @@ void CDiscAdjFEASolver::ExtractAdjoint_Variables(CGeometry *geometry, CConfig *c if (local_index) Local_Sens_DV[iVar] = AD::GetDerivative(AD_Idx_DV_Val[iVar]); else Local_Sens_DV[iVar] = SU2_TYPE::GetDerivative(DV_Val[iVar]); } - SU2_MPI::Allreduce(Local_Sens_DV, Global_Sens_DV, nDV, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(Local_Sens_DV, Global_Sens_DV, nDV, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); } /*--- Extract the flow traction sensitivities ---*/ diff --git a/SU2_CFD/src/solvers/CDiscAdjSolver.cpp b/SU2_CFD/src/solvers/CDiscAdjSolver.cpp index bae0c0de8fc1..efed6499cd09 100644 --- a/SU2_CFD/src/solvers/CDiscAdjSolver.cpp +++ b/SU2_CFD/src/solvers/CDiscAdjSolver.cpp @@ -483,10 +483,10 @@ void CDiscAdjSolver::ExtractAdjoint_Variables(CGeometry *geometry, CConfig *conf Local_Sens_Temp = SU2_TYPE::GetDerivative(Temperature); Local_Sens_Press = SU2_TYPE::GetDerivative(Pressure); - SU2_MPI::Allreduce(&Local_Sens_Mach, &Total_Sens_Mach, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&Local_Sens_AoA, &Total_Sens_AoA, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&Local_Sens_Temp, &Total_Sens_Temp, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&Local_Sens_Press, &Total_Sens_Press, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Local_Sens_Mach, &Total_Sens_Mach, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&Local_Sens_AoA, &Total_Sens_AoA, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&Local_Sens_Temp, &Total_Sens_Temp, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&Local_Sens_Press, &Total_Sens_Press, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); } if ((config->GetKind_Regime() == COMPRESSIBLE) && (KindDirect_Solver == RUNTIME_FLOW_SYS) && config->GetBoolTurbomachinery()){ @@ -495,8 +495,8 @@ void CDiscAdjSolver::ExtractAdjoint_Variables(CGeometry *geometry, CConfig *conf Local_Sens_BPress = SU2_TYPE::GetDerivative(BPressure); Local_Sens_Temperature = SU2_TYPE::GetDerivative(Temperature); - SU2_MPI::Allreduce(&Local_Sens_BPress, &Total_Sens_BPress, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&Local_Sens_Temperature, &Total_Sens_Temp, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Local_Sens_BPress, &Total_Sens_BPress, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&Local_Sens_Temperature, &Total_Sens_Temp, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); } if ((config->GetKind_Regime() == INCOMPRESSIBLE) && @@ -509,9 +509,9 @@ void CDiscAdjSolver::ExtractAdjoint_Variables(CGeometry *geometry, CConfig *conf Local_Sens_BPress = SU2_TYPE::GetDerivative(BPressure); Local_Sens_Temp = SU2_TYPE::GetDerivative(Temperature); - SU2_MPI::Allreduce(&Local_Sens_ModVel, &Total_Sens_ModVel, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&Local_Sens_BPress, &Total_Sens_BPress, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&Local_Sens_Temp, &Total_Sens_Temp, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Local_Sens_ModVel, &Total_Sens_ModVel, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&Local_Sens_BPress, &Total_Sens_BPress, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&Local_Sens_Temp, &Total_Sens_Temp, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); } if ((config->GetKind_Regime() == INCOMPRESSIBLE) && @@ -521,7 +521,7 @@ void CDiscAdjSolver::ExtractAdjoint_Variables(CGeometry *geometry, CConfig *conf su2double Local_Sens_Temp_Rad; Local_Sens_Temp_Rad = SU2_TYPE::GetDerivative(TemperatureRad); - SU2_MPI::Allreduce(&Local_Sens_Temp_Rad, &Total_Sens_Temp_Rad, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Local_Sens_Temp_Rad, &Total_Sens_Temp_Rad, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); /*--- Store it in the Total_Sens_Temp container so it's accessible without the need of a new method ---*/ Total_Sens_Temp = Total_Sens_Temp_Rad; @@ -768,7 +768,7 @@ void CDiscAdjSolver::SetSurface_Sensitivity(CGeometry *geometry, CConfig *config Sens_Geo[iMarker_Monitoring] = 0.0; } - SU2_MPI::Allreduce(MySens_Geo, Sens_Geo, config->GetnMarker_Monitoring(), MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(MySens_Geo, Sens_Geo, config->GetnMarker_Monitoring(), MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); delete [] MySens_Geo; #endif @@ -886,7 +886,7 @@ void CDiscAdjSolver::LoadRestart(CGeometry **geometry, CSolver ***solver, CConfi if (iPoint_Global_Local < nPointDomain) { sbuf_NotMatching = 1; } - SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_SUM, SU2_MPI::GetComm()); if (rbuf_NotMatching != 0) { SU2_MPI::Error(string("The solution file ") + filename + string(" doesn't match with the mesh file!\n") + diff --git a/SU2_CFD/src/solvers/CEulerSolver.cpp b/SU2_CFD/src/solvers/CEulerSolver.cpp index 83221d895ccc..02ea8acb36ef 100644 --- a/SU2_CFD/src/solvers/CEulerSolver.cpp +++ b/SU2_CFD/src/solvers/CEulerSolver.cpp @@ -336,7 +336,7 @@ CEulerSolver::CEulerSolver(CGeometry *geometry, CConfig *config, if (config->GetComm_Level() == COMM_FULL) { - SU2_MPI::Reduce(&counter_local, &counter_global, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&counter_local, &counter_global, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); if ((rank == MASTER_NODE) && (counter_global != 0)) cout << "Warning. The original solution contains " << counter_global << " points that are not physical." << endl; @@ -916,7 +916,7 @@ void CEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geomet /*--- Communicate the counts to iDomain with non-blocking sends ---*/ - SU2_MPI::Isend(&nPointTotal_s[iDomain], 1, MPI_UNSIGNED_LONG, iDomain, iDomain, MPI_COMM_WORLD, &req); + SU2_MPI::Isend(&nPointTotal_s[iDomain], 1, MPI_UNSIGNED_LONG, iDomain, iDomain, SU2_MPI::GetComm(), &req); SU2_MPI::Request_free(&req); } else { @@ -942,7 +942,7 @@ void CEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geomet /*--- Recv the data by probing for the current sender, jDomain, first and then receiving the values from it. ---*/ - SU2_MPI::Recv(&nPointTotal_r[jDomain], 1, MPI_UNSIGNED_LONG, jDomain, rank, MPI_COMM_WORLD, &status); + SU2_MPI::Recv(&nPointTotal_r[jDomain], 1, MPI_UNSIGNED_LONG, jDomain, rank, SU2_MPI::GetComm(), &status); } } @@ -952,7 +952,7 @@ void CEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geomet /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- Initialize the counters for the larger send buffers (by domain) ---*/ @@ -1014,12 +1014,12 @@ void CEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geomet SU2_MPI::Isend(&Buffer_Send_PrimVar[PointTotal_Counter*(nPrimVar_)], nPointTotal_s[iDomain]*(nPrimVar_), MPI_DOUBLE, iDomain, - iDomain, MPI_COMM_WORLD, &req); + iDomain, SU2_MPI::GetComm(), &req); SU2_MPI::Request_free(&req); SU2_MPI::Isend(&Buffer_Send_Data[PointTotal_Counter*(3)], nPointTotal_s[iDomain]*(3), MPI_LONG, iDomain, - iDomain+nDomain, MPI_COMM_WORLD, &req); + iDomain+nDomain, SU2_MPI::GetComm(), &req); SU2_MPI::Request_free(&req); } @@ -1070,7 +1070,7 @@ void CEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geomet /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- The next section begins the recv of all data for the interior points/elements in the mesh. First, create the domain structures for @@ -1091,10 +1091,10 @@ void CEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geomet /*--- Receive the buffers with the coords, global index, and colors ---*/ SU2_MPI::Recv(Buffer_Receive_PrimVar, nPointTotal_r[iDomain]*(nPrimVar_) , MPI_DOUBLE, - iDomain, rank, MPI_COMM_WORLD, &status); + iDomain, rank, SU2_MPI::GetComm(), &status); SU2_MPI::Recv(Buffer_Receive_Data, nPointTotal_r[iDomain]*(3) , MPI_LONG, - iDomain, rank+nDomain, MPI_COMM_WORLD, &status); + iDomain, rank+nDomain, SU2_MPI::GetComm(), &status); /*--- Loop over all of the points that we have recv'd and store the coords, global index vertex and markers ---*/ @@ -1129,7 +1129,7 @@ void CEulerSolver::Set_MPI_ActDisk(CSolver **solver_container, CGeometry *geomet /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- Free all of the memory used for communicating points and elements ---*/ @@ -1226,7 +1226,7 @@ void CEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Communicate the counts to iDomain with non-blocking sends ---*/ - SU2_MPI::Isend(&nPointTotal_s[iDomain], 1, MPI_UNSIGNED_LONG, iDomain, iDomain, MPI_COMM_WORLD, &req); + SU2_MPI::Isend(&nPointTotal_s[iDomain], 1, MPI_UNSIGNED_LONG, iDomain, iDomain, SU2_MPI::GetComm(), &req); SU2_MPI::Request_free(&req); } else { @@ -1252,7 +1252,7 @@ void CEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Recv the data by probing for the current sender, jDomain, first and then receiving the values from it. ---*/ - SU2_MPI::Recv(&nPointTotal_r[jDomain], 1, MPI_UNSIGNED_LONG, jDomain, rank, MPI_COMM_WORLD, &status); + SU2_MPI::Recv(&nPointTotal_r[jDomain], 1, MPI_UNSIGNED_LONG, jDomain, rank, SU2_MPI::GetComm(), &status); } } @@ -1262,7 +1262,7 @@ void CEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- Initialize the counters for the larger send buffers (by domain) ---*/ @@ -1313,7 +1313,7 @@ void CEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { SU2_MPI::Isend(&Buffer_Send_PrimVar[PointTotal_Counter*(nPrimVar+3)], nPointTotal_s[iDomain]*(nPrimVar+3), MPI_DOUBLE, iDomain, - iDomain, MPI_COMM_WORLD, &req); + iDomain, SU2_MPI::GetComm(), &req); SU2_MPI::Request_free(&req); } @@ -1363,7 +1363,7 @@ void CEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- The next section begins the recv of all data for the interior points/elements in the mesh. First, create the domain structures for @@ -1383,7 +1383,7 @@ void CEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Receive the buffers with the coords, global index, and colors ---*/ SU2_MPI::Recv(Buffer_Receive_PrimVar, nPointTotal_r[iDomain]*(nPrimVar+3) , MPI_DOUBLE, - iDomain, rank, MPI_COMM_WORLD, &status); + iDomain, rank, SU2_MPI::GetComm(), &status); /*--- Loop over all of the points that we have recv'd and store the coords, global index vertex and markers ---*/ @@ -1421,7 +1421,7 @@ void CEulerSolver::Set_MPI_Nearfield(CGeometry *geometry, CConfig *config) { /*--- Wait for the non-blocking sends to complete. ---*/ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); /*--- Free all of the memory used for communicating points and elements ---*/ @@ -2207,7 +2207,7 @@ void CEulerSolver::CommonPreprocessing(CGeometry *geometry, CSolver **solver_con SU2_OMP_MASTER { unsigned long tmp = ErrorCounter; - SU2_MPI::Allreduce(&tmp, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); config->SetNonphysical_Points(ErrorCounter); } SU2_OMP_BARRIER @@ -2512,10 +2512,10 @@ void CEulerSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_container, SU2_OMP_MASTER if (config->GetComm_Level() == COMM_FULL) { su2double rbuf_time; - SU2_MPI::Allreduce(&Min_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Min_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); Min_Delta_Time = rbuf_time; - SU2_MPI::Allreduce(&Max_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Max_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); Max_Delta_Time = rbuf_time; } SU2_OMP_BARRIER @@ -2567,7 +2567,7 @@ void CEulerSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_container, SU2_OMP_MASTER { - SU2_MPI::Allreduce(&Global_Delta_UnstTimeND, &glbDtND, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Global_Delta_UnstTimeND, &glbDtND, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); Global_Delta_UnstTimeND = glbDtND; config->SetDelta_UnstTimeND(Global_Delta_UnstTimeND); @@ -2836,7 +2836,7 @@ void CEulerSolver::Upwind_Residual(CGeometry *geometry, CSolver **solver_contain SU2_OMP_MASTER { counter_local = ErrorCounter; - SU2_MPI::Reduce(&counter_local, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&counter_local, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); config->SetNonphysical_Reconstr(ErrorCounter); } SU2_OMP_BARRIER @@ -4080,32 +4080,32 @@ void CEulerSolver::GetPower_Properties(CGeometry *geometry, CConfig *config, uns /*--- All the ranks to compute the total value ---*/ - SU2_MPI::Allreduce(Inlet_MassFlow_Local, Inlet_MassFlow_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_ReverseMassFlow_Local, Inlet_ReverseMassFlow_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_Pressure_Local, Inlet_Pressure_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_Mach_Local, Inlet_Mach_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_MinPressure_Local, Inlet_MinPressure_Total, nMarker_Inlet, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_MaxPressure_Local, Inlet_MaxPressure_Total, nMarker_Inlet, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_TotalPressure_Local, Inlet_TotalPressure_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_Temperature_Local, Inlet_Temperature_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_TotalTemperature_Local, Inlet_TotalTemperature_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_RamDrag_Local, Inlet_RamDrag_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_Force_Local, Inlet_Force_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_Power_Local, Inlet_Power_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_Area_Local, Inlet_Area_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_XCG_Local, Inlet_XCG_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Inlet_YCG_Local, Inlet_YCG_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - if (nDim == 3) SU2_MPI::Allreduce(Inlet_ZCG_Local, Inlet_ZCG_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - - SU2_MPI::Allreduce(Outlet_MassFlow_Local, Outlet_MassFlow_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Outlet_Pressure_Local, Outlet_Pressure_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Outlet_TotalPressure_Local, Outlet_TotalPressure_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Outlet_Temperature_Local, Outlet_Temperature_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Outlet_TotalTemperature_Local, Outlet_TotalTemperature_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Outlet_GrossThrust_Local, Outlet_GrossThrust_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Outlet_Force_Local, Outlet_Force_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Outlet_Power_Local, Outlet_Power_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Outlet_Area_Local, Outlet_Area_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(Inlet_MassFlow_Local, Inlet_MassFlow_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_ReverseMassFlow_Local, Inlet_ReverseMassFlow_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_Pressure_Local, Inlet_Pressure_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_Mach_Local, Inlet_Mach_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_MinPressure_Local, Inlet_MinPressure_Total, nMarker_Inlet, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_MaxPressure_Local, Inlet_MaxPressure_Total, nMarker_Inlet, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_TotalPressure_Local, Inlet_TotalPressure_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_Temperature_Local, Inlet_Temperature_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_TotalTemperature_Local, Inlet_TotalTemperature_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_RamDrag_Local, Inlet_RamDrag_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_Force_Local, Inlet_Force_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_Power_Local, Inlet_Power_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_Area_Local, Inlet_Area_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_XCG_Local, Inlet_XCG_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Inlet_YCG_Local, Inlet_YCG_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + if (nDim == 3) SU2_MPI::Allreduce(Inlet_ZCG_Local, Inlet_ZCG_Total, nMarker_Inlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + + SU2_MPI::Allreduce(Outlet_MassFlow_Local, Outlet_MassFlow_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Outlet_Pressure_Local, Outlet_Pressure_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Outlet_TotalPressure_Local, Outlet_TotalPressure_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Outlet_Temperature_Local, Outlet_Temperature_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Outlet_TotalTemperature_Local, Outlet_TotalTemperature_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Outlet_GrossThrust_Local, Outlet_GrossThrust_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Outlet_Force_Local, Outlet_Force_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Outlet_Power_Local, Outlet_Power_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Outlet_Area_Local, Outlet_Area_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); /*--- Compute the value of the average surface temperature and pressure and set the value in the config structure for future use ---*/ @@ -5084,7 +5084,7 @@ void CEulerSolver::SetActDisk_BCThrust(CGeometry *geometry, CSolver **solver_con if (!ActDisk_Info) config->SetInitial_BCThrust(0.0); MyBCThrust = config->GetInitial_BCThrust(); - SU2_MPI::Allreduce(&MyBCThrust, &BCThrust, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyBCThrust, &BCThrust, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); config->SetInitial_BCThrust(BCThrust); } @@ -6977,12 +6977,12 @@ void CEulerSolver::PreprocessBC_Giles(CGeometry *geometry, CConfig *config, CNum cktemp_out2 = complex(0.0,0.0); - SU2_MPI::Allreduce(&MyRe_inf, &Re_inf, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyIm_inf, &Im_inf, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyRe_out1, &Re_out1, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyIm_out1, &Im_out1, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyRe_out2, &Re_out2, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyIm_out2, &Im_out2, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyRe_inf, &Re_inf, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyIm_inf, &Im_inf, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyRe_out1, &Re_out1, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyIm_out1, &Im_out1, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyRe_out2, &Re_out2, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyIm_out2, &Im_out2, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); cktemp_inf = complex(Re_inf,Im_inf); cktemp_out1 = complex(Re_out1,Im_out1); @@ -10324,8 +10324,8 @@ void CEulerSolver::PreprocessAverage(CSolver **solver, CGeometry *geometry, CCon su2double MyTotalAreaDensity = TotalAreaDensity; su2double MyTotalAreaPressure = TotalAreaPressure; - SU2_MPI::Allreduce(&MyTotalAreaDensity, &TotalAreaDensity, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyTotalAreaPressure, &TotalAreaPressure, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyTotalAreaDensity, &TotalAreaDensity, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyTotalAreaPressure, &TotalAreaPressure, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); su2double* MyTotalAreaVelocity = new su2double[nDim]; @@ -10333,7 +10333,7 @@ void CEulerSolver::PreprocessAverage(CSolver **solver, CGeometry *geometry, CCon MyTotalAreaVelocity[iDim] = TotalAreaVelocity[iDim]; } - SU2_MPI::Allreduce(MyTotalAreaVelocity, TotalAreaVelocity, nDim, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(MyTotalAreaVelocity, TotalAreaVelocity, nDim, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); delete [] MyTotalAreaVelocity; @@ -10652,7 +10652,7 @@ void CEulerSolver::TurboAverageProcess(CSolver **solver, CGeometry *geometry, CC auto Allreduce = [](su2double x) { su2double tmp = x; x = 0.0; - SU2_MPI::Allreduce(&tmp, &x, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &x, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); return x; }; @@ -10677,7 +10677,7 @@ void CEulerSolver::TurboAverageProcess(CSolver **solver, CGeometry *geometry, CC su2double* buffer = new su2double[max(nVar,nDim)]; auto Allreduce_inplace = [buffer](int size, su2double* x) { - SU2_MPI::Allreduce(x, buffer, size, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(x, buffer, size, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); for(int i=0; i numPoints(size); unsigned long num = myPoints.size(); - SU2_MPI::Allgather(&num, 1, MPI_UNSIGNED_LONG, numPoints.data(), 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + SU2_MPI::Allgather(&num, 1, MPI_UNSIGNED_LONG, numPoints.data(), 1, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); /*--- Global to local map for the halo points of the rank (not covered by the CGeometry map). ---*/ unordered_map Global2Local; @@ -746,13 +746,13 @@ void CFEASolver::Set_VertexEliminationSchedule(CGeometry *geometry, const vector for (int i = 0; i < size; ++i) { /*--- Send our point list. ---*/ if (rank == i) { - SU2_MPI::Bcast(myPoints.data(), numPoints[i], MPI_UNSIGNED_LONG, rank, MPI_COMM_WORLD); + SU2_MPI::Bcast(myPoints.data(), numPoints[i], MPI_UNSIGNED_LONG, rank, SU2_MPI::GetComm()); continue; } /*--- Receive point list. ---*/ vector theirPoints(numPoints[i]); - SU2_MPI::Bcast(theirPoints.data(), numPoints[i], MPI_UNSIGNED_LONG, i, MPI_COMM_WORLD); + SU2_MPI::Bcast(theirPoints.data(), numPoints[i], MPI_UNSIGNED_LONG, i, SU2_MPI::GetComm()); for (auto iPointGlobal : theirPoints) { /*--- Check if the rank has the point. ---*/ @@ -1475,7 +1475,7 @@ void CFEASolver::Compute_NodalStress(CGeometry *geometry, CNumerics **numerics, } // end SU2_OMP_PARALLEL su2double tmp = MaxVonMises_Stress; - SU2_MPI::Allreduce(&tmp, &MaxVonMises_Stress, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &MaxVonMises_Stress, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); /*--- Set the value of the MaxVonMises_Stress as the CFEA coeffient ---*/ @@ -2860,8 +2860,8 @@ void CFEASolver::ComputeAitken_Coefficient(CGeometry *geometry, CConfig *config, } - SU2_MPI::Allreduce(&sbuf_numAitk, &rbuf_numAitk, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&sbuf_denAitk, &rbuf_denAitk, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&sbuf_numAitk, &rbuf_numAitk, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&sbuf_denAitk, &rbuf_denAitk, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); WAitkDyn = GetWAitken_Dyn(); @@ -3011,7 +3011,7 @@ void CFEASolver::Compute_OFRefGeom(CGeometry *geometry, const CConfig *config){ atomicAdd(obj_fun_local, objective_function); } - SU2_MPI::Allreduce(&objective_function, &Total_OFRefGeom, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&objective_function, &Total_OFRefGeom, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); Total_OFRefGeom *= config->GetRefGeom_Penalty() / geometry->GetGlobal_nPointDomain(); Total_OFRefGeom += PenaltyValue; @@ -3054,7 +3054,7 @@ void CFEASolver::Compute_OFRefNode(CGeometry *geometry, const CConfig *config){ } } - SU2_MPI::Allreduce(dist, dist_reduce, MAXNVAR, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(dist, dist_reduce, MAXNVAR, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); Total_OFRefNode = config->GetRefNode_Penalty() * Norm(int(MAXNVAR),dist_reduce) + PenaltyValue; @@ -3107,11 +3107,11 @@ void CFEASolver::Compute_OFVolFrac(CGeometry *geometry, const CConfig *config) } su2double tmp; - SU2_MPI::Allreduce(&total_volume,&tmp,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + SU2_MPI::Allreduce(&total_volume,&tmp,1,MPI_DOUBLE,MPI_SUM,SU2_MPI::GetComm()); total_volume = tmp; - SU2_MPI::Allreduce(&integral,&tmp,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + SU2_MPI::Allreduce(&integral,&tmp,1,MPI_DOUBLE,MPI_SUM,SU2_MPI::GetComm()); integral = tmp; - SU2_MPI::Allreduce(&discreteness,&tmp,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + SU2_MPI::Allreduce(&discreteness,&tmp,1,MPI_DOUBLE,MPI_SUM,SU2_MPI::GetComm()); discreteness = tmp; Total_OFDiscreteness = discreteness/total_volume; @@ -3167,7 +3167,7 @@ void CFEASolver::Compute_OFCompliance(CGeometry *geometry, const CConfig *config atomicAdd(comp_local, compliance); } - SU2_MPI::Allreduce(&compliance, &Total_OFCompliance, 1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + SU2_MPI::Allreduce(&compliance, &Total_OFCompliance, 1,MPI_DOUBLE,MPI_SUM,SU2_MPI::GetComm()); } @@ -3240,8 +3240,8 @@ void CFEASolver::Stiffness_Penalty(CGeometry *geometry, CNumerics **numerics, CC // Reduce value across processors for parallelization - SU2_MPI::Allreduce(&weightedValue, &weightedValue_reduce, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&totalVolume, &totalVolume_reduce, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&weightedValue, &weightedValue_reduce, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&totalVolume, &totalVolume_reduce, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); su2double ratio = 1.0 - weightedValue_reduce/totalVolume_reduce; @@ -3379,7 +3379,7 @@ void CFEASolver::ExtractAdjoint_Variables(CGeometry *geometry, CConfig *config) #ifdef HAVE_MPI if (rank == MASTER_NODE) rec_buf = new float[nElemDomain]; /*--- Need to use this version of Reduce instead of the wrapped one because we use float ---*/ - MPI_Reduce(send_buf,rec_buf,nElemDomain,MPI_FLOAT,MPI_SUM,MASTER_NODE,MPI_COMM_WORLD); + MPI_Reduce(send_buf,rec_buf,nElemDomain,MPI_FLOAT,MPI_SUM,MASTER_NODE,SU2_MPI::GetComm()); #else rec_buf = send_buf; #endif diff --git a/SU2_CFD/src/solvers/CFEM_DG_EulerSolver.cpp b/SU2_CFD/src/solvers/CFEM_DG_EulerSolver.cpp index d923e4fadf89..5ec7465a6cc8 100644 --- a/SU2_CFD/src/solvers/CFEM_DG_EulerSolver.cpp +++ b/SU2_CFD/src/solvers/CFEM_DG_EulerSolver.cpp @@ -390,7 +390,7 @@ CFEM_DG_EulerSolver::CFEM_DG_EulerSolver(CGeometry *geometry, CConfig *config, u /*--- Determine the global number of DOFs. ---*/ #ifdef HAVE_MPI - SU2_MPI::Allreduce(&nDOFsLocOwned, &nDOFsGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&nDOFsLocOwned, &nDOFsGlobal, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); #else nDOFsGlobal = nDOFsLocOwned; #endif @@ -1318,7 +1318,7 @@ void CFEM_DG_EulerSolver::DetermineGraphDOFs(const CMeshFEM *FEMGeometry, #ifdef HAVE_MPI SU2_MPI::Allgather(&nDOFsLocOwned, 1, MPI_UNSIGNED_LONG, &nDOFsPerRank[1], 1, - MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); #else nDOFsPerRank[1] = nDOFsLocOwned; #endif @@ -1369,7 +1369,7 @@ void CFEM_DG_EulerSolver::DetermineGraphDOFs(const CMeshFEM *FEMGeometry, /* Send the data using non-blocking sends to avoid deadlock. */ int dest = ranksSend[i]; SU2_MPI::Isend(sendBuf[i].data(), sendBuf[i].size(), MPI_UNSIGNED_LONG, - dest, dest, MPI_COMM_WORLD, &sendReqs[i]); + dest, dest, SU2_MPI::GetComm(), &sendReqs[i]); } /* Create a map of the receive rank to the index in ranksRecv. */ @@ -1383,7 +1383,7 @@ void CFEM_DG_EulerSolver::DetermineGraphDOFs(const CMeshFEM *FEMGeometry, /* Block until a message arrives and determine the source and size of the message. */ SU2_MPI::Status status; - SU2_MPI::Probe(MPI_ANY_SOURCE, rank, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(MPI_ANY_SOURCE, rank, SU2_MPI::GetComm(), &status); int source = status.MPI_SOURCE; int sizeMess; @@ -1393,7 +1393,7 @@ void CFEM_DG_EulerSolver::DetermineGraphDOFs(const CMeshFEM *FEMGeometry, and determine the actual index of this rank in ranksRecv. */ vector recvBuf(sizeMess); SU2_MPI::Recv(recvBuf.data(), sizeMess, MPI_UNSIGNED_LONG, - source, rank, MPI_COMM_WORLD, &status); + source, rank, SU2_MPI::GetComm(), &status); map::const_iterator MI = rankToIndRecvBuf.find(source); source = MI->second; @@ -1415,7 +1415,7 @@ void CFEM_DG_EulerSolver::DetermineGraphDOFs(const CMeshFEM *FEMGeometry, /* Wild cards have been used in the communication, so synchronize the ranks to avoid problems. */ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #else @@ -1523,7 +1523,7 @@ void CFEM_DG_EulerSolver::DetermineGraphDOFs(const CMeshFEM *FEMGeometry, /* Send the data using non-blocking sends to avoid deadlock. */ int dest = ranksRecv[i]; SU2_MPI::Isend(invSendBuf[i].data(), invSendBuf[i].size(), MPI_UNSIGNED_LONG, - dest, dest+1, MPI_COMM_WORLD, &invSendReqs[i]); + dest, dest+1, SU2_MPI::GetComm(), &invSendReqs[i]); } /* Create a map of the inverse receive (i.e. the original send) rank @@ -1539,7 +1539,7 @@ void CFEM_DG_EulerSolver::DetermineGraphDOFs(const CMeshFEM *FEMGeometry, /* Block until a message arrives and determine the source and size of the message. */ SU2_MPI::Status status; - SU2_MPI::Probe(MPI_ANY_SOURCE, rank+1, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(MPI_ANY_SOURCE, rank+1, SU2_MPI::GetComm(), &status); int source = status.MPI_SOURCE; int sizeMess; @@ -1549,7 +1549,7 @@ void CFEM_DG_EulerSolver::DetermineGraphDOFs(const CMeshFEM *FEMGeometry, and determine the actual index of this rank in ranksSend. */ vector recvBuf(sizeMess); SU2_MPI::Recv(recvBuf.data(), sizeMess, MPI_UNSIGNED_LONG, - source, rank+1, MPI_COMM_WORLD, &status); + source, rank+1, SU2_MPI::GetComm(), &status); map::const_iterator MI = rankToIndSendBuf.find(source); source = MI->second; @@ -1576,7 +1576,7 @@ void CFEM_DG_EulerSolver::DetermineGraphDOFs(const CMeshFEM *FEMGeometry, /* Wild cards have been used in the communication, so synchronize the ranks to avoid problems. */ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #else /*--- Sequential implementation. Just add the data of the halo DOFs @@ -1718,7 +1718,7 @@ void CFEM_DG_EulerSolver::MetaDataJacobianComputation(const CMeshFEM *FEMGeom const int ind = MI->second; SU2_MPI::Isend(sendBuf[ind].data(), sendBuf[ind].size(), MPI_UNSIGNED_LONG, - dest, dest+2, MPI_COMM_WORLD, &sendReqs[i]); + dest, dest+2, SU2_MPI::GetComm(), &sendReqs[i]); } /* Loop over the ranks from which I receive data to be processed. The number @@ -1730,7 +1730,7 @@ void CFEM_DG_EulerSolver::MetaDataJacobianComputation(const CMeshFEM *FEMGeom /* Block until a message arrives and determine the source and size of the message. */ SU2_MPI::Status status; - SU2_MPI::Probe(MPI_ANY_SOURCE, rank+2, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(MPI_ANY_SOURCE, rank+2, SU2_MPI::GetComm(), &status); int source = status.MPI_SOURCE; int sizeMess; @@ -1742,7 +1742,7 @@ void CFEM_DG_EulerSolver::MetaDataJacobianComputation(const CMeshFEM *FEMGeom sendReturnBuf[i].resize(sizeMess); SU2_MPI::Recv(recvBuf.data(), sizeMess, MPI_UNSIGNED_LONG, - source, rank+2, MPI_COMM_WORLD, &status); + source, rank+2, SU2_MPI::GetComm(), &status); /* Loop over the data just received and fill the return send buffer with the color of the DOFs. */ @@ -1758,7 +1758,7 @@ void CFEM_DG_EulerSolver::MetaDataJacobianComputation(const CMeshFEM *FEMGeom /* Send the return buffer back to the calling rank. Again use non-blocking sends to avoid deadlock. */ SU2_MPI::Isend(sendReturnBuf[i].data(), sendReturnBuf[i].size(), MPI_INT, - source, source+3, MPI_COMM_WORLD, &sendReturnReqs[i]); + source, source+3, SU2_MPI::GetComm(), &sendReturnReqs[i]); } /* Complete the first round of non-blocking sends. */ @@ -1770,7 +1770,7 @@ void CFEM_DG_EulerSolver::MetaDataJacobianComputation(const CMeshFEM *FEMGeom /* Block until a message arrives and determine the source of the message and its index in the original send buffers. */ SU2_MPI::Status status; - SU2_MPI::Probe(MPI_ANY_SOURCE, rank+3, MPI_COMM_WORLD, &status); + SU2_MPI::Probe(MPI_ANY_SOURCE, rank+3, SU2_MPI::GetComm(), &status); int source = status.MPI_SOURCE; MI = rankCommToInd.find(source); @@ -1780,7 +1780,7 @@ void CFEM_DG_EulerSolver::MetaDataJacobianComputation(const CMeshFEM *FEMGeom a blocking receive. */ vector recvBuf(sendBuf[ind].size()); SU2_MPI::Recv(recvBuf.data(), recvBuf.size(), MPI_INT, - source, rank+3, MPI_COMM_WORLD, &status); + source, rank+3, SU2_MPI::GetComm(), &status); /* Loop over the data just received and add them to the map mapMatrixIndToColor .*/ @@ -1793,7 +1793,7 @@ void CFEM_DG_EulerSolver::MetaDataJacobianComputation(const CMeshFEM *FEMGeom /* Wild cards have been used in the communication, so synchronize the ranks to avoid problems. */ - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif @@ -2433,7 +2433,7 @@ void CFEM_DG_EulerSolver::SetUpTaskList(CConfig *config) { } #ifdef HAVE_MPI - SU2_MPI::Barrier(MPI_COMM_WORLD); + SU2_MPI::Barrier(SU2_MPI::GetComm()); #endif } @@ -2750,7 +2750,7 @@ void CFEM_DG_EulerSolver::Initiate_MPI_Communication(CConfig *config, /* Send the data using non-blocking sends. */ int dest = ranksSendMPI[timeLevel][i]; int tag = dest + timeLevel; - SU2_MPI::Isend(sendBuf, ii, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD, + SU2_MPI::Isend(sendBuf, ii, MPI_DOUBLE, dest, tag, SU2_MPI::GetComm(), &commRequests[timeLevel][indComm]); } @@ -2762,7 +2762,7 @@ void CFEM_DG_EulerSolver::Initiate_MPI_Communication(CConfig *config, int tag = rank + timeLevel; SU2_MPI::Irecv(commRecvBuf[timeLevel][i].data(), commRecvBuf[timeLevel][i].size(), - MPI_DOUBLE, source, tag, MPI_COMM_WORLD, + MPI_DOUBLE, source, tag, SU2_MPI::GetComm(), &commRequests[timeLevel][indComm]); } } @@ -2996,7 +2996,7 @@ void CFEM_DG_EulerSolver::Initiate_MPI_ReverseCommunication(CConfig *config, /* Send the data using non-blocking sends. */ int dest = ranksRecvMPI[timeLevel][i]; int tag = dest + timeLevel + 20; - SU2_MPI::Isend(recvBuf, ii, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD, + SU2_MPI::Isend(recvBuf, ii, MPI_DOUBLE, dest, tag, SU2_MPI::GetComm(), &commRequests[timeLevel][indComm]); } @@ -3008,7 +3008,7 @@ void CFEM_DG_EulerSolver::Initiate_MPI_ReverseCommunication(CConfig *config, int tag = rank + timeLevel + 20; SU2_MPI::Irecv(commSendBuf[timeLevel][i].data(), commSendBuf[timeLevel][i].size(), - MPI_DOUBLE, source, tag, MPI_COMM_WORLD, + MPI_DOUBLE, source, tag, SU2_MPI::GetComm(), &commRequests[timeLevel][indComm]); } } @@ -3206,7 +3206,7 @@ void CFEM_DG_EulerSolver::Preprocessing(CGeometry *geometry, CSolver **solver_co if (config->GetComm_Level() == COMM_FULL) { #ifdef HAVE_MPI unsigned long MyErrorCounter = ErrorCounter; - SU2_MPI::Allreduce(&MyErrorCounter, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyErrorCounter, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); #endif if (iMesh == MESH_0) config->SetNonphysical_Points(ErrorCounter); } @@ -3726,10 +3726,10 @@ void CFEM_DG_EulerSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_con if ((config->GetComm_Level() == COMM_FULL) || time_stepping) { #ifdef HAVE_MPI su2double rbuf_time = Min_Delta_Time; - SU2_MPI::Allreduce(&rbuf_time, &Min_Delta_Time, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&rbuf_time, &Min_Delta_Time, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); rbuf_time = Max_Delta_Time; - SU2_MPI::Allreduce(&rbuf_time, &Max_Delta_Time, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&rbuf_time, &Max_Delta_Time, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); #endif } @@ -4147,7 +4147,7 @@ void CFEM_DG_EulerSolver::TolerancesADERPredictorStep(void) { #ifdef HAVE_MPI SU2_MPI::Allreduce(URef, TolSolADER.data(), nVar, MPI_DOUBLE, MPI_MAX, - MPI_COMM_WORLD); + SU2_MPI::GetComm()); #else for(unsigned short i=0; iGetComm_Level() == COMM_FULL) { SU2_MPI::Allreduce(locBuf.data(), globBuf.data(), nCommSize, MPI_DOUBLE, - MPI_SUM, MPI_COMM_WORLD); + MPI_SUM, SU2_MPI::GetComm()); } /*--- Copy the data back from globBuf into the required variables. ---*/ @@ -7262,7 +7262,7 @@ void CFEM_DG_EulerSolver::SetResidual_RMS_FEM(CGeometry *geometry, /*--- The local L2 norms must be added to obtain the global value. Also check for divergence. ---*/ vector rbufRes(nVar); - SU2_MPI::Allreduce(Residual_RMS, rbufRes.data(), nVar, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(Residual_RMS, rbufRes.data(), nVar, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); for(unsigned short iVar=0; iVar rbufPoint(nVar*size); SU2_MPI::Allgather(Point_Max, nVar, MPI_UNSIGNED_LONG, rbufPoint.data(), - nVar, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); + nVar, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); vector sbufCoor(nDim*nVar); for(unsigned short iVar=0; iVar rbufCoor(nDim*nVar*size); SU2_MPI::Allgather(sbufCoor.data(), nVar*nDim, MPI_DOUBLE, rbufCoor.data(), - nVar*nDim, MPI_DOUBLE, MPI_COMM_WORLD); + nVar*nDim, MPI_DOUBLE, SU2_MPI::GetComm()); for(unsigned short iVar=0; iVarGetComm_Level() == COMM_FULL) { #ifdef HAVE_MPI unsigned long nBadDOFsLoc = nBadDOFs; - SU2_MPI::Reduce(&nBadDOFsLoc, &nBadDOFs, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&nBadDOFsLoc, &nBadDOFs, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); #endif if((rank == MASTER_NODE) && (nBadDOFs != 0)) diff --git a/SU2_CFD/src/solvers/CFEM_DG_NSSolver.cpp b/SU2_CFD/src/solvers/CFEM_DG_NSSolver.cpp index 8672eb91ec5c..ec563e340110 100644 --- a/SU2_CFD/src/solvers/CFEM_DG_NSSolver.cpp +++ b/SU2_CFD/src/solvers/CFEM_DG_NSSolver.cpp @@ -850,7 +850,7 @@ void CFEM_DG_NSSolver::Friction_Forces(const CGeometry* geometry, const CConfig* /* Sum up all the data from all ranks. The result will be available on all ranks. */ if (config->GetComm_Level() == COMM_FULL) { SU2_MPI::Allreduce(locBuf.data(), globBuf.data(), nCommSize, MPI_DOUBLE, - MPI_SUM, MPI_COMM_WORLD); + MPI_SUM, SU2_MPI::GetComm()); } /*--- Copy the data back from globBuf into the required variables. ---*/ @@ -877,7 +877,7 @@ void CFEM_DG_NSSolver::Friction_Forces(const CGeometry* geometry, const CConfig* su2double localMax = AllBound_MaxHeatFlux_Visc; if (config->GetComm_Level() == COMM_FULL) { SU2_MPI::Allreduce(&localMax, &AllBound_MaxHeatFlux_Visc, 1, MPI_DOUBLE, - MPI_MAX, MPI_COMM_WORLD); + MPI_MAX, SU2_MPI::GetComm()); } #endif @@ -1301,10 +1301,10 @@ void CFEM_DG_NSSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_contai if ((config->GetComm_Level() == COMM_FULL) || time_stepping) { #ifdef HAVE_MPI su2double rbuf_time = Min_Delta_Time; - SU2_MPI::Allreduce(&rbuf_time, &Min_Delta_Time, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&rbuf_time, &Min_Delta_Time, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); rbuf_time = Max_Delta_Time; - SU2_MPI::Allreduce(&rbuf_time, &Max_Delta_Time, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&rbuf_time, &Max_Delta_Time, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); #endif } diff --git a/SU2_CFD/src/solvers/CHeatSolver.cpp b/SU2_CFD/src/solvers/CHeatSolver.cpp index 21618f92304c..4130587ec2cc 100644 --- a/SU2_CFD/src/solvers/CHeatSolver.cpp +++ b/SU2_CFD/src/solvers/CHeatSolver.cpp @@ -50,7 +50,7 @@ CHeatSolver::CHeatSolver(CGeometry *geometry, CConfig *config, unsigned short iM dynamic_grid = config->GetDynamic_Grid(); #ifdef HAVE_MPI - MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_rank(SU2_MPI::GetComm(), &rank); #endif /*--- Dimension of the problem --> temperature is the only conservative variable ---*/ @@ -314,7 +314,7 @@ void CHeatSolver::LoadRestart(CGeometry **geometry, CSolver ***solver, CConfig * int rank = MASTER_NODE; #ifdef HAVE_MPI - MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_rank(SU2_MPI::GetComm(), &rank); #endif int counter = 0; @@ -381,7 +381,7 @@ void CHeatSolver::LoadRestart(CGeometry **geometry, CSolver ***solver, CConfig * #ifndef HAVE_MPI rbuf_NotMatching = sbuf_NotMatching; #else - SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_SUM, SU2_MPI::GetComm()); #endif if (rbuf_NotMatching != 0) { if (rank == MASTER_NODE) { @@ -391,8 +391,8 @@ void CHeatSolver::LoadRestart(CGeometry **geometry, CSolver ***solver, CConfig * #ifndef HAVE_MPI exit(EXIT_FAILURE); #else - MPI_Barrier(MPI_COMM_WORLD); - MPI_Abort(MPI_COMM_WORLD,1); + MPI_Barrier(SU2_MPI::GetComm()); + MPI_Abort(SU2_MPI::GetComm(),1); MPI_Finalize(); #endif } @@ -754,8 +754,8 @@ void CHeatSolver::Set_Heatflux_Areas(CGeometry *geometry, CConfig *config) { } } - SU2_MPI::Allreduce(Local_Surface_Areas, Surface_Areas, config->GetnMarker_HeatFlux(), MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&Local_HeatFlux_Areas_Monitor, &Total_HeatFlux_Areas_Monitor, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(Local_Surface_Areas, Surface_Areas, config->GetnMarker_HeatFlux(), MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&Local_HeatFlux_Areas_Monitor, &Total_HeatFlux_Areas_Monitor, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); Total_HeatFlux_Areas = 0.0; for( iMarker_HeatFlux = 0; iMarker_HeatFlux < config->GetnMarker_HeatFlux(); iMarker_HeatFlux++ ) { @@ -1264,8 +1264,8 @@ void CHeatSolver::Heat_Fluxes(CGeometry *geometry, CSolver **solver_container, C #ifdef HAVE_MPI MyAllBound_HeatFlux = AllBound_HeatFlux; MyAllBound_AverageT = AllBound_AverageT; - SU2_MPI::Allreduce(&MyAllBound_HeatFlux, &AllBound_HeatFlux, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyAllBound_AverageT, &AllBound_AverageT, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyAllBound_HeatFlux, &AllBound_HeatFlux, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyAllBound_AverageT, &AllBound_AverageT, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); #endif if (Total_HeatFlux_Areas_Monitor != 0.0) { @@ -1448,13 +1448,13 @@ void CHeatSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_container, #ifdef HAVE_MPI su2double rbuf_time, sbuf_time; sbuf_time = Min_Delta_Time; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Min_Delta_Time = rbuf_time; sbuf_time = Max_Delta_Time; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Max_Delta_Time = rbuf_time; #endif } @@ -1464,8 +1464,8 @@ void CHeatSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_container, #ifdef HAVE_MPI su2double rbuf_time, sbuf_time; sbuf_time = Global_Delta_Time; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Global_Delta_Time = rbuf_time; #endif for (iPoint = 0; iPoint < nPointDomain; iPoint++) @@ -1480,8 +1480,8 @@ void CHeatSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_container, #ifdef HAVE_MPI su2double rbuf_time, sbuf_time; sbuf_time = Global_Delta_UnstTimeND; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Global_Delta_UnstTimeND = rbuf_time; #endif config->SetDelta_UnstTimeND(Global_Delta_UnstTimeND); diff --git a/SU2_CFD/src/solvers/CIncEulerSolver.cpp b/SU2_CFD/src/solvers/CIncEulerSolver.cpp index e3df3da16efb..6862311beb5d 100644 --- a/SU2_CFD/src/solvers/CIncEulerSolver.cpp +++ b/SU2_CFD/src/solvers/CIncEulerSolver.cpp @@ -985,7 +985,7 @@ void CIncEulerSolver::Preprocessing(CGeometry *geometry, CSolver **solver_contai if (config->GetComm_Level() == COMM_FULL) { #ifdef HAVE_MPI unsigned long MyErrorCounter = ErrorCounter; ErrorCounter = 0; - SU2_MPI::Allreduce(&MyErrorCounter, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyErrorCounter, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); #endif if (iMesh == MESH_0) config->SetNonphysical_Points(ErrorCounter); } @@ -1151,13 +1151,13 @@ void CIncEulerSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_contain #ifdef HAVE_MPI su2double rbuf_time, sbuf_time; sbuf_time = Min_Delta_Time; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Min_Delta_Time = rbuf_time; sbuf_time = Max_Delta_Time; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Max_Delta_Time = rbuf_time; #endif } @@ -1168,8 +1168,8 @@ void CIncEulerSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_contain #ifdef HAVE_MPI su2double rbuf_time, sbuf_time; sbuf_time = Global_Delta_Time; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Global_Delta_Time = rbuf_time; #endif /*--- If the unsteady CFL is set to zero, it uses the defined @@ -1205,8 +1205,8 @@ void CIncEulerSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_contain #ifdef HAVE_MPI su2double rbuf_time, sbuf_time; sbuf_time = Global_Delta_UnstTimeND; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Global_Delta_UnstTimeND = rbuf_time; #endif config->SetDelta_UnstTimeND(Global_Delta_UnstTimeND); @@ -1431,7 +1431,7 @@ void CIncEulerSolver::Upwind_Residual(CGeometry *geometry, CSolver **solver_cont if (config->GetComm_Level() == COMM_FULL) { if (iMesh == MESH_0) { - SU2_MPI::Reduce(&counter_local, &counter_global, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&counter_local, &counter_global, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); config->SetNonphysical_Reconstr(counter_global); } } @@ -2216,7 +2216,7 @@ void CIncEulerSolver::SetBeta_Parameter(CGeometry *geometry, CSolver **solver_co #ifdef HAVE_MPI su2double myMaxVel2 = maxVel2; maxVel2 = 0.0; - SU2_MPI::Allreduce(&myMaxVel2, &maxVel2, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&myMaxVel2, &maxVel2, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); #endif Beta = max(1e-10,maxVel2); @@ -3374,9 +3374,9 @@ void CIncEulerSolver::GetOutlet_Properties(CGeometry *geometry, CConfig *config, #ifdef HAVE_MPI - SU2_MPI::Allreduce(Outlet_MassFlow_Local, Outlet_MassFlow_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Outlet_Density_Local, Outlet_Density_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(Outlet_Area_Local, Outlet_Area_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(Outlet_MassFlow_Local, Outlet_MassFlow_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Outlet_Density_Local, Outlet_Density_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(Outlet_Area_Local, Outlet_Area_Total, nMarker_Outlet, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); #else diff --git a/SU2_CFD/src/solvers/CIncNSSolver.cpp b/SU2_CFD/src/solvers/CIncNSSolver.cpp index 976da459e9e3..130894855f90 100644 --- a/SU2_CFD/src/solvers/CIncNSSolver.cpp +++ b/SU2_CFD/src/solvers/CIncNSSolver.cpp @@ -147,9 +147,9 @@ void CIncNSSolver::Preprocessing(CGeometry *geometry, CSolver **solver_container su2double MyOmega_Max = Omega_Max; Omega_Max = 0.0; su2double MyStrainMag_Max = StrainMag_Max; StrainMag_Max = 0.0; - SU2_MPI::Allreduce(&MyErrorCounter, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyStrainMag_Max, &StrainMag_Max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyOmega_Max, &Omega_Max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyErrorCounter, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyStrainMag_Max, &StrainMag_Max, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyOmega_Max, &Omega_Max, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); #endif if (iMesh == MESH_0) @@ -366,13 +366,13 @@ void CIncNSSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_container, #ifdef HAVE_MPI su2double rbuf_time, sbuf_time; sbuf_time = Min_Delta_Time; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Min_Delta_Time = rbuf_time; sbuf_time = Max_Delta_Time; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Max_Delta_Time = rbuf_time; #endif } @@ -382,8 +382,8 @@ void CIncNSSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_container, #ifdef HAVE_MPI su2double rbuf_time, sbuf_time; sbuf_time = Global_Delta_Time; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Global_Delta_Time = rbuf_time; #endif /*--- If the unsteady CFL is set to zero, it uses the defined @@ -418,8 +418,8 @@ void CIncNSSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_container, #ifdef HAVE_MPI su2double rbuf_time, sbuf_time; sbuf_time = Global_Delta_UnstTimeND; - SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD); - SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&sbuf_time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm()); + SU2_MPI::Bcast(&rbuf_time, 1, MPI_DOUBLE, MASTER_NODE, SU2_MPI::GetComm()); Global_Delta_UnstTimeND = rbuf_time; #endif config->SetDelta_UnstTimeND(Global_Delta_UnstTimeND); diff --git a/SU2_CFD/src/solvers/CMeshSolver.cpp b/SU2_CFD/src/solvers/CMeshSolver.cpp index 5704f6c7bbd1..b2ceac739d48 100644 --- a/SU2_CFD/src/solvers/CMeshSolver.cpp +++ b/SU2_CFD/src/solvers/CMeshSolver.cpp @@ -249,9 +249,9 @@ void CMeshSolver::SetMinMaxVolume(CGeometry *geometry, CConfig *config, bool upd SU2_OMP_MASTER { elCount = ElemCounter; maxVol = MaxVolume; minVol = MinVolume; - SU2_MPI::Allreduce(&elCount, &ElemCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&maxVol, &MaxVolume, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&minVol, &MinVolume, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&elCount, &ElemCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&maxVol, &MaxVolume, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&minVol, &MinVolume, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); } SU2_OMP_BARRIER @@ -379,8 +379,8 @@ void CMeshSolver::SetWallDistance(CGeometry *geometry, CConfig *config) { { MaxDistance_Local = MaxDistance; MinDistance_Local = MinDistance; - SU2_MPI::Allreduce(&MaxDistance_Local, &MaxDistance, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MinDistance_Local, &MinDistance, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MaxDistance_Local, &MaxDistance, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MinDistance_Local, &MinDistance, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); } SU2_OMP_BARRIER } diff --git a/SU2_CFD/src/solvers/CNEMOEulerSolver.cpp b/SU2_CFD/src/solvers/CNEMOEulerSolver.cpp index 86aa426bef54..8e6c4346ef12 100644 --- a/SU2_CFD/src/solvers/CNEMOEulerSolver.cpp +++ b/SU2_CFD/src/solvers/CNEMOEulerSolver.cpp @@ -254,7 +254,7 @@ CNEMOEulerSolver::CNEMOEulerSolver(CGeometry *geometry, CConfig *config, /*--- Warning message about non-physical points ---*/ if (config->GetComm_Level() == COMM_FULL) { - SU2_MPI::Reduce(&counter_local, &counter_global, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&counter_local, &counter_global, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); if ((rank == MASTER_NODE) && (counter_global != 0)) cout << "Warning. The original solution contains "<< counter_global << " points that are not physical." << endl; @@ -351,7 +351,7 @@ void CNEMOEulerSolver::CommonPreprocessing(CGeometry *geometry, CSolver **solver if ((iMesh == MESH_0) && (config->GetComm_Level() == COMM_FULL)) { unsigned long tmp = ErrorCounter; - SU2_MPI::Allreduce(&tmp, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); config->SetNonphysical_Points(ErrorCounter); } @@ -614,10 +614,10 @@ void CNEMOEulerSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_contai SU2_OMP_MASTER if (config->GetComm_Level() == COMM_FULL) { su2double rbuf_time; - SU2_MPI::Allreduce(&Min_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Min_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); Min_Delta_Time = rbuf_time; - SU2_MPI::Allreduce(&Max_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Max_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); Max_Delta_Time = rbuf_time; } SU2_OMP_BARRIER @@ -666,7 +666,7 @@ void CNEMOEulerSolver::SetTime_Step(CGeometry *geometry, CSolver **solver_contai SU2_OMP_MASTER { - SU2_MPI::Allreduce(&Global_Delta_UnstTimeND, &glbDtND, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Global_Delta_UnstTimeND, &glbDtND, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); Global_Delta_UnstTimeND = glbDtND; config->SetDelta_UnstTimeND(Global_Delta_UnstTimeND); @@ -991,7 +991,7 @@ void CNEMOEulerSolver::Upwind_Residual(CGeometry *geometry, CSolver **solver_con SU2_OMP_MASTER { counter_local = ErrorCounter; - SU2_MPI::Reduce(&counter_local, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Reduce(&counter_local, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm()); config->SetNonphysical_Reconstr(ErrorCounter); } SU2_OMP_BARRIER @@ -3087,7 +3087,7 @@ void CNEMOEulerSolver::LoadRestart(CGeometry **geometry, CSolver ***solver, CCon #ifndef HAVE_MPI rbuf_NotMatching = sbuf_NotMatching; #else - SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&sbuf_NotMatching, &rbuf_NotMatching, 1, MPI_UNSIGNED_SHORT, MPI_SUM, SU2_MPI::GetComm()); #endif if (rbuf_NotMatching != 0) { SU2_MPI::Error(string("The solution file ") + restart_filename + string(" doesn't match with the mesh file!\n") + diff --git a/SU2_CFD/src/solvers/CNEMONSSolver.cpp b/SU2_CFD/src/solvers/CNEMONSSolver.cpp index 1f72a622ada8..d0f5b8996dc2 100644 --- a/SU2_CFD/src/solvers/CNEMONSSolver.cpp +++ b/SU2_CFD/src/solvers/CNEMONSSolver.cpp @@ -136,8 +136,8 @@ void CNEMONSSolver::Preprocessing(CGeometry *geometry, CSolver **solver_containe su2double MyOmega_Max = Omega_Max; //su2double MyStrainMag_Max = StrainMag_Max; - //SU2_MPI::Allreduce(&MyStrainMag_Max, &StrainMag_Max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyOmega_Max, &Omega_Max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + //SU2_MPI::Allreduce(&MyStrainMag_Max, &StrainMag_Max, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyOmega_Max, &Omega_Max, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); } } diff --git a/SU2_CFD/src/solvers/CNSSolver.cpp b/SU2_CFD/src/solvers/CNSSolver.cpp index 9ea15c36e10a..4bd7ba28d6e0 100644 --- a/SU2_CFD/src/solvers/CNSSolver.cpp +++ b/SU2_CFD/src/solvers/CNSSolver.cpp @@ -181,8 +181,8 @@ void CNSSolver::Preprocessing(CGeometry *geometry, CSolver **solver_container, C su2double MyOmega_Max = Omega_Max; su2double MyStrainMag_Max = StrainMag_Max; - SU2_MPI::Allreduce(&MyStrainMag_Max, &StrainMag_Max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyOmega_Max, &Omega_Max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyStrainMag_Max, &StrainMag_Max, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyOmega_Max, &Omega_Max, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); } SU2_OMP_BARRIER } @@ -391,7 +391,7 @@ void CNSSolver::Buffet_Monitoring(const CGeometry *geometry, const CConfig *conf /*--- Add buffet metric information using all the nodes ---*/ su2double MyTotal_Buffet_Metric = Total_Buffet_Metric; - SU2_MPI::Allreduce(&MyTotal_Buffet_Metric, &Total_Buffet_Metric, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyTotal_Buffet_Metric, &Total_Buffet_Metric, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); /*--- Add the buffet metric on the surfaces using all the nodes ---*/ @@ -401,7 +401,7 @@ void CNSSolver::Buffet_Monitoring(const CGeometry *geometry, const CConfig *conf MySurface_Buffet_Metric[iMarker_Monitoring] = Surface_Buffet_Metric[iMarker_Monitoring]; } - SU2_MPI::Allreduce(MySurface_Buffet_Metric, Surface_Buffet_Metric, config->GetnMarker_Monitoring(), MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(MySurface_Buffet_Metric, Surface_Buffet_Metric, config->GetnMarker_Monitoring(), MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); delete [] MySurface_Buffet_Metric; diff --git a/SU2_CFD/src/solvers/CRadP1Solver.cpp b/SU2_CFD/src/solvers/CRadP1Solver.cpp index 3d4533d3a210..76bcf61681fc 100644 --- a/SU2_CFD/src/solvers/CRadP1Solver.cpp +++ b/SU2_CFD/src/solvers/CRadP1Solver.cpp @@ -662,10 +662,10 @@ void CRadP1Solver::SetTime_Step(CGeometry *geometry, CSolver **solver_container, su2double sbuf_time; sbuf_time = Min_Delta_Time; - SU2_MPI::Allreduce(&sbuf_time, &Min_Delta_Time, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&sbuf_time, &Min_Delta_Time, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); sbuf_time = Max_Delta_Time; - SU2_MPI::Allreduce(&sbuf_time, &Max_Delta_Time, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&sbuf_time, &Max_Delta_Time, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); } } diff --git a/SU2_CFD/src/solvers/CSolver.cpp b/SU2_CFD/src/solvers/CSolver.cpp index bb318fd4a0fc..d07c9476ca43 100644 --- a/SU2_CFD/src/solvers/CSolver.cpp +++ b/SU2_CFD/src/solvers/CSolver.cpp @@ -2235,9 +2235,9 @@ void CSolver::AdaptCFLNumber(CGeometry **geometry, SU2_OMP_MASTER { /* MPI reduction. */ myCFLMin = Min_CFL_Local; myCFLMax = Max_CFL_Local; myCFLSum = Avg_CFL_Local; - SU2_MPI::Allreduce(&myCFLMin, &Min_CFL_Local, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&myCFLMax, &Max_CFL_Local, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&myCFLSum, &Avg_CFL_Local, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&myCFLMin, &Min_CFL_Local, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&myCFLMax, &Max_CFL_Local, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&myCFLSum, &Avg_CFL_Local, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); Avg_CFL_Local /= su2double(geometry[iMesh]->GetGlobal_nPointDomain()); } SU2_OMP_BARRIER @@ -2283,8 +2283,8 @@ void CSolver::SetResidual_RMS(CGeometry *geometry, CConfig *config) { if (config->GetComm_Level() == COMM_FULL) { unsigned long Local_nPointDomain = geometry->GetnPointDomain(); - SU2_MPI::Allreduce(sbuf_residual, rbuf_residual, nVar, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&Local_nPointDomain, &Global_nPointDomain, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(sbuf_residual, rbuf_residual, nVar, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&Local_nPointDomain, &Global_nPointDomain, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); } else { @@ -2329,9 +2329,9 @@ void CSolver::SetResidual_RMS(CGeometry *geometry, CConfig *config) { sbuf_coord[iVar*nDim+iDim] = Coord[iDim]; } - SU2_MPI::Allgather(sbuf_residual, nVar, MPI_DOUBLE, rbuf_residual, nVar, MPI_DOUBLE, MPI_COMM_WORLD); - SU2_MPI::Allgather(sbuf_point, nVar, MPI_UNSIGNED_LONG, rbuf_point, nVar, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); - SU2_MPI::Allgather(sbuf_coord, nVar*nDim, MPI_DOUBLE, rbuf_coord, nVar*nDim, MPI_DOUBLE, MPI_COMM_WORLD); + SU2_MPI::Allgather(sbuf_residual, nVar, MPI_DOUBLE, rbuf_residual, nVar, MPI_DOUBLE, SU2_MPI::GetComm()); + SU2_MPI::Allgather(sbuf_point, nVar, MPI_UNSIGNED_LONG, rbuf_point, nVar, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); + SU2_MPI::Allgather(sbuf_coord, nVar*nDim, MPI_DOUBLE, rbuf_coord, nVar*nDim, MPI_DOUBLE, SU2_MPI::GetComm()); for (iVar = 0; iVar < nVar; iVar++) { for (iProcessor = 0; iProcessor < nProcessor; iProcessor++) { @@ -2386,8 +2386,8 @@ void CSolver::SetResidual_BGS(CGeometry *geometry, CConfig *config) { Local_nPointDomain = geometry->GetnPointDomain(); - SU2_MPI::Allreduce(sbuf_residual, rbuf_residual, nVar, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&Local_nPointDomain, &Global_nPointDomain, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(sbuf_residual, rbuf_residual, nVar, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&Local_nPointDomain, &Global_nPointDomain, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); for (iVar = 0; iVar < nVar; iVar++) { @@ -2422,9 +2422,9 @@ void CSolver::SetResidual_BGS(CGeometry *geometry, CConfig *config) { sbuf_coord[iVar*nDim+iDim] = Coord[iDim]; } - SU2_MPI::Allgather(sbuf_residual, nVar, MPI_DOUBLE, rbuf_residual, nVar, MPI_DOUBLE, MPI_COMM_WORLD); - SU2_MPI::Allgather(sbuf_point, nVar, MPI_UNSIGNED_LONG, rbuf_point, nVar, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); - SU2_MPI::Allgather(sbuf_coord, nVar*nDim, MPI_DOUBLE, rbuf_coord, nVar*nDim, MPI_DOUBLE, MPI_COMM_WORLD); + SU2_MPI::Allgather(sbuf_residual, nVar, MPI_DOUBLE, rbuf_residual, nVar, MPI_DOUBLE, SU2_MPI::GetComm()); + SU2_MPI::Allgather(sbuf_point, nVar, MPI_UNSIGNED_LONG, rbuf_point, nVar, MPI_UNSIGNED_LONG, SU2_MPI::GetComm()); + SU2_MPI::Allgather(sbuf_coord, nVar*nDim, MPI_DOUBLE, rbuf_coord, nVar*nDim, MPI_DOUBLE, SU2_MPI::GetComm()); for (iVar = 0; iVar < nVar; iVar++) { for (iProcessor = 0; iProcessor < nProcessor; iProcessor++) { @@ -3150,7 +3150,7 @@ void CSolver::Read_SU2_Restart_ASCII(CGeometry *geometry, const CConfig *config, /*--- All ranks open the file using MPI. ---*/ - ierr = MPI_File_open(MPI_COMM_WORLD, fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); + ierr = MPI_File_open(SU2_MPI::GetComm(), fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); /*--- Error check opening the file. ---*/ @@ -3166,7 +3166,7 @@ void CSolver::Read_SU2_Restart_ASCII(CGeometry *geometry, const CConfig *config, /*--- Broadcast the number of variables to all procs and store clearly. ---*/ - SU2_MPI::Bcast(&magic_number, 1, MPI_INT, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(&magic_number, 1, MPI_INT, MASTER_NODE, SU2_MPI::GetComm()); /*--- Check that this is an SU2 binary file. SU2 binary files have the hex representation of "SU2" as the first int in the file. ---*/ @@ -3335,7 +3335,7 @@ void CSolver::Read_SU2_Restart_Binary(CGeometry *geometry, const CConfig *config /*--- All ranks open the file using MPI. ---*/ - ierr = MPI_File_open(MPI_COMM_WORLD, fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); + ierr = MPI_File_open(SU2_MPI::GetComm(), fname, MPI_MODE_RDONLY, MPI_INFO_NULL, &fhw); /*--- Error check opening the file. ---*/ @@ -3352,7 +3352,7 @@ void CSolver::Read_SU2_Restart_Binary(CGeometry *geometry, const CConfig *config /*--- Broadcast the number of variables to all procs and store clearly. ---*/ - SU2_MPI::Bcast(Restart_Vars, nRestart_Vars, MPI_INT, MASTER_NODE, MPI_COMM_WORLD); + SU2_MPI::Bcast(Restart_Vars, nRestart_Vars, MPI_INT, MASTER_NODE, SU2_MPI::GetComm()); /*--- Check that this is an SU2 binary file. SU2 binary files have the hex representation of "SU2" as the first int in the file. ---*/ @@ -3382,7 +3382,7 @@ void CSolver::Read_SU2_Restart_Binary(CGeometry *geometry, const CConfig *config /*--- Broadcast the string names of the variables. ---*/ SU2_MPI::Bcast(mpi_str_buf, nFields*CGNS_STRING_SIZE, MPI_CHAR, - MASTER_NODE, MPI_COMM_WORLD); + MASTER_NODE, SU2_MPI::GetComm()); /*--- Now parse the string names and load into the config class in case we need them for writing visualization files (SU2_SOL). ---*/ @@ -3948,7 +3948,7 @@ void CSolver::LoadInletProfile(CGeometry **geometry, } // end iMarker loop - SU2_MPI::Allreduce(&local_failure, &global_failure, 1, MPI_UNSIGNED_SHORT, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&local_failure, &global_failure, 1, MPI_UNSIGNED_SHORT, MPI_SUM, SU2_MPI::GetComm()); if (global_failure > 0) { SU2_MPI::Error("Prescribed inlet data does not match markers within tolerance.", CURRENT_FUNCTION); From 3462ddacbaf5259f454c25f68fc9a6724a85ae2a Mon Sep 17 00:00:00 2001 From: Alessandro Gastaldi Date: Mon, 1 Feb 2021 14:53:21 +0100 Subject: [PATCH 2/2] Remove dummy MPI_COMM_WORLD altogether --- Common/include/parallelization/mpi_structure.cpp | 8 ++++++++ Common/include/parallelization/mpi_structure.hpp | 1 - SU2_CFD/include/solvers/CFVMFlowSolverBase.hpp | 10 +++++----- SU2_CFD/src/SU2_CFD.cpp | 2 +- SU2_CFD/src/solvers/CIncEulerSolver.cpp | 4 ++-- SU2_DEF/src/SU2_DEF.cpp | 2 +- SU2_DOT/src/SU2_DOT.cpp | 6 +++--- SU2_GEO/src/SU2_GEO.cpp | 2 +- SU2_SOL/src/SU2_SOL.cpp | 2 +- UnitTests/test_driver.cpp | 2 +- 10 files changed, 23 insertions(+), 16 deletions(-) diff --git a/Common/include/parallelization/mpi_structure.cpp b/Common/include/parallelization/mpi_structure.cpp index c707cef4877e..962426d1d4d7 100644 --- a/Common/include/parallelization/mpi_structure.cpp +++ b/Common/include/parallelization/mpi_structure.cpp @@ -27,9 +27,17 @@ #include "mpi_structure.hpp" + +/* Initialise the MPI Communicator Rank and Size */ int CBaseMPIWrapper::Rank = 0; int CBaseMPIWrapper::Size = 1; + +/* Set the default MPI Communicator */ +#ifdef HAVE_MPI CBaseMPIWrapper::Comm CBaseMPIWrapper::currentComm = MPI_COMM_WORLD; +#else +CBaseMPIWrapper::Comm CBaseMPIWrapper::currentComm = 0; // dummy value +#endif #ifdef HAVE_MPI int CBaseMPIWrapper::MinRankError; diff --git a/Common/include/parallelization/mpi_structure.hpp b/Common/include/parallelization/mpi_structure.hpp index b4640bfbcd93..f538db18f6f6 100644 --- a/Common/include/parallelization/mpi_structure.hpp +++ b/Common/include/parallelization/mpi_structure.hpp @@ -467,7 +467,6 @@ class CMediMPIWrapper : public CBaseMPIWrapper { #else // HAVE_MPI -#define MPI_COMM_WORLD 0 #define MPI_UNSIGNED_LONG 1 #define MPI_LONG 2 #define MPI_UNSIGNED_SHORT 3 diff --git a/SU2_CFD/include/solvers/CFVMFlowSolverBase.hpp b/SU2_CFD/include/solvers/CFVMFlowSolverBase.hpp index 36adc6c7c366..131ade3eda69 100644 --- a/SU2_CFD/include/solvers/CFVMFlowSolverBase.hpp +++ b/SU2_CFD/include/solvers/CFVMFlowSolverBase.hpp @@ -458,10 +458,10 @@ class CFVMFlowSolverBase : public CSolver { SU2_OMP_MASTER if (config->GetComm_Level() == COMM_FULL) { su2double rbuf_time; - SU2_MPI::Allreduce(&Min_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Min_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); Min_Delta_Time = rbuf_time; - SU2_MPI::Allreduce(&Max_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Max_Delta_Time, &rbuf_time, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); Max_Delta_Time = rbuf_time; } SU2_OMP_BARRIER @@ -513,7 +513,7 @@ class CFVMFlowSolverBase : public CSolver { SU2_OMP_MASTER { - SU2_MPI::Allreduce(&Global_Delta_UnstTimeND, &glbDtND, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&Global_Delta_UnstTimeND, &glbDtND, 1, MPI_DOUBLE, MPI_MIN, SU2_MPI::GetComm()); Global_Delta_UnstTimeND = glbDtND; config->SetDelta_UnstTimeND(Global_Delta_UnstTimeND); @@ -1068,8 +1068,8 @@ class CFVMFlowSolverBase : public CSolver { su2double MyOmega_Max = Omega_Max; su2double MyStrainMag_Max = StrainMag_Max; - SU2_MPI::Allreduce(&MyStrainMag_Max, &StrainMag_Max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); - SU2_MPI::Allreduce(&MyOmega_Max, &Omega_Max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&MyStrainMag_Max, &StrainMag_Max, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); + SU2_MPI::Allreduce(&MyOmega_Max, &Omega_Max, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); } SU2_OMP_BARRIER } diff --git a/SU2_CFD/src/SU2_CFD.cpp b/SU2_CFD/src/SU2_CFD.cpp index 25246cc1d21d..a73cb5126dc9 100644 --- a/SU2_CFD/src/SU2_CFD.cpp +++ b/SU2_CFD/src/SU2_CFD.cpp @@ -67,7 +67,7 @@ int main(int argc, char *argv[]) { #else SU2_MPI::Init(&argc, &argv); #endif - SU2_Comm MPICommunicator(MPI_COMM_WORLD); + SU2_MPI::Comm MPICommunicator = SU2_MPI::GetComm(); /*--- Uncomment the following line if runtime NaN catching is desired. ---*/ // feenableexcept(FE_INVALID | FE_OVERFLOW); diff --git a/SU2_CFD/src/solvers/CIncEulerSolver.cpp b/SU2_CFD/src/solvers/CIncEulerSolver.cpp index 3f40c1474929..85a14c212dce 100644 --- a/SU2_CFD/src/solvers/CIncEulerSolver.cpp +++ b/SU2_CFD/src/solvers/CIncEulerSolver.cpp @@ -835,7 +835,7 @@ void CIncEulerSolver::CommonPreprocessing(CGeometry *geometry, CSolver **solver_ SU2_OMP_MASTER { unsigned long tmp = ErrorCounter; - SU2_MPI::Allreduce(&tmp, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&tmp, &ErrorCounter, 1, MPI_UNSIGNED_LONG, MPI_SUM, SU2_MPI::GetComm()); config->SetNonphysical_Points(ErrorCounter); } SU2_OMP_BARRIER @@ -1660,7 +1660,7 @@ void CIncEulerSolver::SetBeta_Parameter(CGeometry *geometry, CSolver **solver_co SU2_OMP_MASTER { maxVel2 = MaxVel2; - SU2_MPI::Allreduce(&maxVel2, &MaxVel2, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&maxVel2, &MaxVel2, 1, MPI_DOUBLE, MPI_MAX, SU2_MPI::GetComm()); config->SetMax_Vel2(max(1e-10, MaxVel2)); } diff --git a/SU2_DEF/src/SU2_DEF.cpp b/SU2_DEF/src/SU2_DEF.cpp index ad0f705d1e2a..41852e1ef958 100644 --- a/SU2_DEF/src/SU2_DEF.cpp +++ b/SU2_DEF/src/SU2_DEF.cpp @@ -45,7 +45,7 @@ int main(int argc, char *argv[]) { #else SU2_MPI::Init(&argc, &argv); #endif - SU2_MPI::Comm MPICommunicator(MPI_COMM_WORLD); + SU2_MPI::Comm MPICommunicator = SU2_MPI::GetComm(); rank = SU2_MPI::GetRank(); size = SU2_MPI::GetSize(); diff --git a/SU2_DOT/src/SU2_DOT.cpp b/SU2_DOT/src/SU2_DOT.cpp index 61e3acb3d96c..6a1fb804e93c 100644 --- a/SU2_DOT/src/SU2_DOT.cpp +++ b/SU2_DOT/src/SU2_DOT.cpp @@ -44,7 +44,7 @@ int main(int argc, char *argv[]) { #else SU2_MPI::Init(&argc, &argv); #endif - SU2_MPI::Comm MPICommunicator(MPI_COMM_WORLD); + SU2_MPI::Comm MPICommunicator = SU2_MPI::GetComm(); const int rank = SU2_MPI::GetRank(); const int size = SU2_MPI::GetSize(); @@ -644,7 +644,7 @@ void SetProjection_FD(CGeometry *geometry, CConfig *config, CSurfaceMovement *su } } - SU2_MPI::Allreduce(&my_Gradient, &localGradient, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&my_Gradient, &localGradient, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); Gradient[iDV][0] += localGradient; } } @@ -770,7 +770,7 @@ void SetProjection_AD(CGeometry *geometry, CConfig *config, CSurfaceMovement *su for (iDV_Value = 0; iDV_Value < nDV_Value; iDV_Value++){ DV_Value = config->GetDV_Value(iDV, iDV_Value); my_Gradient = SU2_TYPE::GetDerivative(DV_Value); - SU2_MPI::Allreduce(&my_Gradient, &localGradient, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + SU2_MPI::Allreduce(&my_Gradient, &localGradient, 1, MPI_DOUBLE, MPI_SUM, SU2_MPI::GetComm()); /*--- Angle of Attack design variable (this is different, the value comes form the input file) ---*/ diff --git a/SU2_GEO/src/SU2_GEO.cpp b/SU2_GEO/src/SU2_GEO.cpp index 1e6b07815f5f..9bfff3e3fbf0 100644 --- a/SU2_GEO/src/SU2_GEO.cpp +++ b/SU2_GEO/src/SU2_GEO.cpp @@ -59,7 +59,7 @@ int main(int argc, char *argv[]) { /*--- MPI initialization ---*/ SU2_MPI::Init(&argc,&argv); - SU2_MPI::Comm MPICommunicator(MPI_COMM_WORLD); + SU2_MPI::Comm MPICommunicator = SU2_MPI::GetComm(); rank = SU2_MPI::GetRank(); size = SU2_MPI::GetSize(); diff --git a/SU2_SOL/src/SU2_SOL.cpp b/SU2_SOL/src/SU2_SOL.cpp index c0cfcf719e8b..68b4da4e4752 100644 --- a/SU2_SOL/src/SU2_SOL.cpp +++ b/SU2_SOL/src/SU2_SOL.cpp @@ -40,7 +40,7 @@ int main(int argc, char *argv[]) { /*--- MPI initialization ---*/ SU2_MPI::Init(&argc,&argv); - SU2_MPI::Comm MPICommunicator(MPI_COMM_WORLD); + SU2_MPI::Comm MPICommunicator = SU2_MPI::GetComm(); const int rank = SU2_MPI::GetRank(); const int size = SU2_MPI::GetSize(); diff --git a/UnitTests/test_driver.cpp b/UnitTests/test_driver.cpp index d53ef2b9063d..0fd92e5dc054 100644 --- a/UnitTests/test_driver.cpp +++ b/UnitTests/test_driver.cpp @@ -43,7 +43,7 @@ int main(int argc, char *argv[]) { #else SU2_MPI::Init(&argc, &argv); #endif - SU2_Comm MPICommunicator(MPI_COMM_WORLD); + SU2_MPI::Comm MPICommunicator = SU2_MPI::GetComm(); /*--- Run the test driver supplied by Catch ---*/ int result = Catch::Session().run(argc, argv);