diff --git a/CMakeLists.txt b/CMakeLists.txt index c93e373d..a2b62df0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -252,7 +252,7 @@ IF ( DOWNLOAD_MMG ) EXTERNALPROJECT_ADD ( Mmg GIT_REPOSITORY https://github.com/MmgTools/mmg.git - GIT_TAG 2263f92c + GIT_TAG 88d9d1ed6746a1e68713623859ca070224a5abbc INSTALL_COMMAND ${CMAKE_MAKE_PROGRAM} install CMAKE_ARGS ${MMG_ARGS} -DUSE_ELAS=OFF ${COMPILER_CFG} ${FLAGS_CFG} ${SCOTCH_CFG} ${VTK_CFG} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} diff --git a/cmake/testing/pmmg_tests.cmake b/cmake/testing/pmmg_tests.cmake index d820b149..83d6cdbf 100644 --- a/cmake/testing/pmmg_tests.cmake +++ b/cmake/testing/pmmg_tests.cmake @@ -441,6 +441,117 @@ IF( BUILD_TESTING ) ${CI_DIR}/Cube/internaltriangles-P3.mesh -v 10 -out ${CI_DIR_RESULTS}/internaltriangles-P3.o.mesh) + ############################################################################### + ##### + ##### Tests overlap + ##### + ############################################################################### + # Test if overlap is created + set(overlapCreation "## Create Overlap.") + + add_test( NAME overlap-create + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-create.o.mesh) + set_property(TEST overlap-create + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCreation}") + + # Test if overlap is deleted + set(overlapDelete "## Delete Overlap.") + + add_test( NAME overlap-delete + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-delete.o.mesh) + set_property(TEST overlap-delete + PROPERTY PASS_REGULAR_EXPRESSION "${overlapDelete}") + + # Tests if overlap is created correctly + set(overlapCheckP0P1 "OVERLAP - part 0 sends 74 pts and 257 tetra to part 1") + set(overlapCheckP0P2 "OVERLAP - part 0 sends 29 pts and 110 tetra to part 2") + set(overlapCheckP0P3 "OVERLAP - part 0 sends 61 pts and 204 tetra to part 3") + set(overlapCheckP0P4 "OVERLAP - part 0 sends 28 pts and 66 tetra to part 4") + set(overlapCheckP0 "OVERLAP - part 0 has 433 pts and 1492 tetras after overlap creation") + + add_test( NAME overlap-check-P0P1 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0P1.o.mesh) + set_property(TEST overlap-check-P0P1 + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0P1}") + + add_test( NAME overlap-check-P0P2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0P2.o.mesh) + set_property(TEST overlap-check-P0P2 + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0P2}") + + add_test( NAME overlap-check-P0P3 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0P3.o.mesh) + set_property(TEST overlap-check-P0P3 + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0P3}") + + add_test( NAME overlap-check-P0P4 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0P4.o.mesh) + set_property(TEST overlap-check-P0P4 + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0P4}") + + add_test( NAME overlap-check-P0 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0.o.mesh) + set_property(TEST overlap-check-P0 + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0}") + + add_test( NAME overlap-check-P0-met + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -met ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-metric.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0-met.o.mesh) + set_property(TEST overlap-check-P0-met + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0}") + + # Tests if overlap is deleted correctly + set(overlapCheckDelete "OVERLAP - part 0 has 282 pts and 882 tetras after overlap deletion") + add_test( NAME overlap-check-delete + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-delete.o.mesh) + set_property(TEST overlap-check-delete + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckDelete}") + + add_test( NAME overlap-check-delete-met + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-delete-met.o.mesh) + set_property(TEST overlap-check-delete-met + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckDelete}") + ############################################################################### ##### ##### Test isovalue mode - ls discretization diff --git a/src/analys_pmmg.c b/src/analys_pmmg.c index 6a4d46e0..235cc2cb 100644 --- a/src/analys_pmmg.c +++ b/src/analys_pmmg.c @@ -90,7 +90,7 @@ typedef struct { MMG5_pxTetra pxt; MMG5_pPoint ppt; double n[3]; - int16_t tag; + uint16_t tag; int ie,ifac,iloc,iadj; int ip,ip1,ip2; int updloc,updpar; @@ -110,7 +110,7 @@ typedef struct { */ static inline int PMMG_hGetOri( MMG5_HGeom *hash,int ip0,int ip1,int *ref,int16_t *color ) { - int16_t tag; + uint16_t tag; /* Get edge from hash table */ if( !MMG5_hGet( hash, @@ -137,7 +137,7 @@ int PMMG_hGetOri( MMG5_HGeom *hash,int ip0,int ip1,int *ref,int16_t *color ) { */ static inline int PMMG_hTagOri( MMG5_HGeom *hash,int ip0,int ip1,int ref,int16_t color ) { - int16_t tag; + uint16_t tag; /* Set bitwise tag from color */ if( color ) { @@ -273,8 +273,8 @@ int PMMG_hashNorver_loop( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var,int16_t ski */ static inline int PMMG_hash_nearParEdges( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { - int ia[2],ip[2],j; - int16_t tag; + int ia[2],ip[2],j; + uint16_t tag; /* Get points */ ia[0] = MMG5_iarf[var->ifac][MMG5_iprv2[var->iloc]]; @@ -312,8 +312,8 @@ int PMMG_hashNorver_edges( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { double *doublevalues; int ia[2],ip[2],gip; int *intvalues,idx,d,edg,j,pos; - int16_t tag; - int8_t found; + uint16_t tag; + int8_t found; doublevalues = parmesh->int_node_comm->doublevalues; intvalues = parmesh->int_node_comm->intvalues; @@ -383,7 +383,7 @@ static inline int PMMG_hashNorver_switch( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { int idx; int ia[2],ip[2],j; - int16_t tag; + uint16_t tag; /* Only process ridge points */ if( !(var->ppt->tag & MG_GEO ) ) return 1; @@ -1207,7 +1207,7 @@ int PMMG_set_edge_owners( PMMG_pParMesh parmesh,MMG5_HGeom *hpar,MPI_Comm comm ) MMG5_pEdge pa; int *intvalues,*itosend,*itorecv; int idx,k,nitem,color,edg,ia,ie,ifac,ip[2],i; - int16_t tag; + uint16_t tag; MPI_Status status; assert( parmesh->ngrp == 1 ); @@ -1478,7 +1478,7 @@ int PMMG_hashNorver( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hash, } static inline -int MMG5_skip_nonOldParBdy ( int8_t tag ) { +uint16_t MMG5_skip_nonOldParBdy ( uint16_t tag ) { return !(tag & MG_OLDPARBDY); } @@ -1561,7 +1561,7 @@ int PMMG_loopr(PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { MMG5_pPoint ppt[2]; double *doublevalues; int *intvalues,ip[2],k,j,idx,ns0,edg,d; - int16_t tag; + uint16_t tag; int8_t isEdg; /* Get node communicator */ @@ -1960,7 +1960,7 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash,MPI_Comm int k,ne,nr,nm,j; int i,i1,i2; int idx,edg,d; - int16_t tag; + uint16_t tag; MPI_Status status; assert( parmesh->ngrp == 1 ); diff --git a/src/boulep_pmmg.c b/src/boulep_pmmg.c index 1922cc4d..7d896b0b 100644 --- a/src/boulep_pmmg.c +++ b/src/boulep_pmmg.c @@ -242,7 +242,7 @@ int PMMG_boulen(PMMG_pParMesh parmesh,MMG5_pMesh mesh,int start,int ip,int iface double dd,l0,l1; int base,nump,nr,nnm,k,piv,na,nb,adj,nvstart,fstart,aux,ip0,ip1; int *adja,color; - int16_t tag; + uint16_t tag; int8_t iopp,ipiv,indb,inda,i,isface; int8_t indedg[4][4] = { {-1,0,1,2}, {0,-1,3,4}, {1,3,-1,5}, {2,4,5,-1} }; diff --git a/src/communicators_pmmg.c b/src/communicators_pmmg.c index 94b9518c..570a0d81 100644 --- a/src/communicators_pmmg.c +++ b/src/communicators_pmmg.c @@ -262,7 +262,7 @@ int PMMG_fillExtEdgeComm_fromFace( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HG PMMG_pExt_comm ext_edge_comm,MMG5_pTetra pt,int ifac,int iloc,int j,int color,int *item ) { MMG5_pEdge pa; int edg; - int16_t tag; + uint16_t tag; int8_t i1,i2; /* Take the edge opposite to vertex iloc+j on face ifac */ @@ -661,7 +661,7 @@ int PMMG_build_edgeComm( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hpar, MMG5_hgeom *ph; int *nitems_ext_comm,color,k,i,idx,ie,ifac,iloc,j,item; int edg; - int16_t tag; + uint16_t tag; int8_t ia,i1,i2; assert( parmesh->ngrp == 1 ); diff --git a/src/debug_pmmg.c b/src/debug_pmmg.c index 0e20bb39..ff364a1b 100644 --- a/src/debug_pmmg.c +++ b/src/debug_pmmg.c @@ -455,7 +455,7 @@ int PMMG_saveQual(MMG5_pMesh mesh, const char *filename) { * Write group surface mesh and edges with given tag in medit format. * */ -int PMMG_grp_to_saveEdges( PMMG_pParMesh parmesh,int grpId,int16_t tag,char *basename ) { +int PMMG_grp_to_saveEdges( PMMG_pParMesh parmesh,int grpId,uint16_t tag,char *basename ) { PMMG_pGrp grp; MMG5_pMesh mesh; MMG5_pTria ptr; diff --git a/src/debug_pmmg.h b/src/debug_pmmg.h index 1e58df33..d61d8775 100644 --- a/src/debug_pmmg.h +++ b/src/debug_pmmg.h @@ -32,7 +32,7 @@ void PMMG_grplst_meshes_to_txt( char *name, PMMG_pGrp grp, int ngrp ); void PMMG_tetras_of_mesh_to_txt( char *name, MMG5_pMesh mesh, int num ); void PMMG_find_tetras_referencing_null_points_to_txt( char *name, PMMG_pGrp grp, int nmsh ); void PMMG_listgrp_meshes_adja_of_tetras_to_txt( char *name, PMMG_pGrp grp, int ngrp ); -int PMMG_grp_to_saveEdges( PMMG_pParMesh parmesh,int grpId,int16_t tag,char *basename ); +int PMMG_grp_to_saveEdges( PMMG_pParMesh parmesh,int grpId,uint16_t tag,char *basename ); int PMMG_grp_to_saveMesh( PMMG_pParMesh parmesh, int grpId, char *basename ); int PMMG_listgrp_to_saveMesh( PMMG_pParMesh parmesh, char *basename ); int PMMG_listgrp_quality_to_saveMesh( PMMG_pParMesh parmesh, char *basename ); diff --git a/src/hash_pmmg.c b/src/hash_pmmg.c index b9c98a62..dd32e87c 100644 --- a/src/hash_pmmg.c +++ b/src/hash_pmmg.c @@ -239,7 +239,7 @@ int PMMG_bdryUpdate( MMG5_pMesh mesh ) MMG5_pxTetra pxt; MMG5_HGeom hash; int k,edg; - int16_t tag; + uint16_t tag; int8_t i,i1,i2; diff --git a/src/libparmmg.c b/src/libparmmg.c index ff31486e..9fd6201a 100644 --- a/src/libparmmg.c +++ b/src/libparmmg.c @@ -375,6 +375,7 @@ int PMMG_preprocessMesh_distributed( PMMG_pParMesh parmesh ) if ( !PMMG_build_nodeCommFromFaces(parmesh,parmesh->info.read_comm) ) { return PMMG_STRONGFAILURE; } + break; case PMMG_APIDISTRIB_nodes : @@ -406,6 +407,7 @@ int PMMG_preprocessMesh_distributed( PMMG_pParMesh parmesh ) if ( !PMMG_ls(parmesh) ) { return PMMG_STRONGFAILURE; } + chrono(OFF,&(ctim[tim])); printim(ctim[tim].gdif,stim); if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { @@ -1103,6 +1105,7 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh,MPI_Comm comm ){ /* Store owner in the point flag */ for( ip = 1; ip <= mesh->np; ip++ ) { ppt = &mesh->point[ip]; + if (ppt->tag & MG_OVERLAP) continue; ppt->flag = parmesh->myrank; } @@ -1138,13 +1141,13 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh,MPI_Comm comm ){ counter = 0; for( ip = 1; ip <= mesh->np; ip++ ) { ppt = &mesh->point[ip]; + if (ppt->tag & MG_OVERLAP) continue; if( ppt->flag != parmesh->myrank ) continue; ppt->tmp = ++counter+offsets[parmesh->myrank]; assert(ppt->tmp); } assert( counter == nowned ); - /** Step 2: Communicate global numbering */ /* Store numbering in the internal communicator */ @@ -1243,6 +1246,7 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh,MPI_Comm comm ){ #ifndef NDEBUG for( ip = 1; ip <= mesh->np; ip++ ) { ppt = &mesh->point[ip]; + if (ppt->tag & MG_OVERLAP) continue; assert(ppt->tmp > 0); assert(ppt->tmp <= offsets[parmesh->nprocs]); } diff --git a/src/libparmmgtypes.h b/src/libparmmgtypes.h index 8aea1c24..6b7cd9f0 100644 --- a/src/libparmmgtypes.h +++ b/src/libparmmgtypes.h @@ -382,6 +382,22 @@ typedef struct { MPI_Comm read_comm; /*!< MPI comm containing the procs that read the mesh (HDF5 input) */ } PMMG_Info; +/** + * \struct PMMG_overlap + * \brief Overlap structure. + */ +typedef struct { + int color_in; /*!< Color of the hosting processor */ + int color_out; /*!< Color of the remote processor */ + int np_in2out; /*!< Nbr of points sends from color_in to color_out */ + int np_out2in; /*!< Nbr of points receives on color_in from color_out */ + int nt_in2out; /*!< Nbr of tetra sends from color_in to color_out */ + int nt_out2in; /*!< Nbr of tetra receives on color_in from color_out */ + int *hash_in2out; /*!< Hash table to find pts index on color_out from pts index on color_in */ + int *hash_out2in; /*!< Hash table to find pts index on color_in from pts index on color_out */ + +} PMMG_Overlap; +typedef PMMG_Overlap * PMMG_pOverlap; /** * \struct PMMG_ParMesh @@ -416,7 +432,6 @@ typedef struct { int nold_grp; /*!< Number of old grp */ PMMG_pGrp old_listgrp; /*!< List of old grp */ - /* internal communicators */ PMMG_pInt_comm int_node_comm; /*!< Internal node communicator (only one PMMG_Int_comm, it is not an array) */ PMMG_pInt_comm int_edge_comm; /*!< Internal edge communicator */ @@ -430,6 +445,9 @@ typedef struct { int next_face_comm; /*!< Number of external face communicator */ PMMG_pExt_comm ext_face_comm; /*!< External communicators (in increasing order w.r. to the remote proc index) */ + /* overlap variables */ + PMMG_pOverlap overlap; /*!< Overlap variables */ + /* global variables */ int ddebug; //! Debug level int iter; //! Current adaptation iteration diff --git a/src/ls_pmmg.c b/src/ls_pmmg.c index 4218f1f5..2a0c17a6 100644 --- a/src/ls_pmmg.c +++ b/src/ls_pmmg.c @@ -162,6 +162,7 @@ if ( parmesh->myrank == parmesh->info.root ) /* Loop over tetra */ for (k=1; k<=mesh->ne; k++) { pt = &mesh->tetra[k]; + if (!MG_EOK(pt)) continue; if ( !MG_EOK(pt) ) { continue; @@ -1103,7 +1104,7 @@ void PMMG_split1_sort(MMG5_pMesh mesh,MMG5_int k,int ifac,uint8_t tau[4], /* Tetra #0 created by MMG5_split1 */ tetra_sorted[0] = k; /* Except for the following: treta #1 created by MMG5_split1 */ - if ( (ifac==tau[0]) ) tetra_sorted[0] = ne_tmp; + if ( ifac==tau[0] ) tetra_sorted[0] = ne_tmp; /* Store index of the node with highest coordinates in node_sorted[0] and sort the vertices by increasing order in v_t0 */ @@ -1603,12 +1604,22 @@ int PMMG_ls(PMMG_pParMesh parmesh) { for (k=1; k<= sol->np; k++) sol->m[k] -= mesh->info.ls; + /* Create overlap */ + if ( !PMMG_create_overlap(parmesh,parmesh->info.read_comm) ) { + return PMMG_STRONGFAILURE; + } + /** \todo TODO :: Snap values of level set function if needed */ if ( !PMMG_snpval_ls(parmesh,mesh,sol) ) { fprintf(stderr,"\n ## Problem with implicit function. Exit program.\n"); return 0; } + /* Delete overlap */ + if ( !PMMG_delete_overlap(parmesh,parmesh->info.read_comm) ) { + return PMMG_STRONGFAILURE; + } + /* Create table of adjacency for tetra */ if ( !MMG3D_hashTetra(mesh,1) ) { fprintf(stderr,"\n ## Hashing problem. Exit program.\n"); @@ -1654,10 +1665,12 @@ int PMMG_ls(PMMG_pParMesh parmesh) { } #ifdef USE_POINTMAP - /* OK - Initialize source point with input index */ + /* Initialize source point with input index */ MMG5_int ip; - for( ip = 1; ip <= mesh->np; ip++ ) - mesh->point[ip].src = ip; + for( ip = 1; ip <= mesh->np; ip++ ) { + if ( (!MG_VOK(&mesh->point[ip])) ) continue; + mesh->point[ip].src = ip; + } #endif /* Compute vertices global numerotation diff --git a/src/mergemesh_pmmg.c b/src/mergemesh_pmmg.c index d2c8667d..ff450f20 100644 --- a/src/mergemesh_pmmg.c +++ b/src/mergemesh_pmmg.c @@ -190,7 +190,7 @@ int PMMG_check_solsNpmax(MMG5_pMesh mesh,MMG5_pSol met,MMG5_pSol ls, */ static inline int PMMG_realloc_pointAndSols(MMG5_pMesh mesh,MMG5_pSol met,MMG5_pSol ls, - MMG5_pSol disp,MMG5_pSol field,double *c,int16_t tag,int src) { + MMG5_pSol disp,MMG5_pSol field,double *c,uint16_t tag,int src) { MMG5_pSol psl; int ip = 0; int oldnpmax = mesh->npmax; diff --git a/src/mpi_pmmg.h b/src/mpi_pmmg.h index a4d08ef6..ddc6ce37 100644 --- a/src/mpi_pmmg.h +++ b/src/mpi_pmmg.h @@ -48,6 +48,7 @@ #define MPI_COMMUNICATORS_REF_TAG 9000 #define MPI_ANALYS_TAG 10000 #define MPI_MERGEMESH_TAG 11000 +#define MPI_OVERLAP_TAG 12000 #define MPI_CHECK(func_call,on_failure) do { \ int mpi_ret_val; \ diff --git a/src/overlap_pmmg.c b/src/overlap_pmmg.c new file mode 100644 index 00000000..ab8eb471 --- /dev/null +++ b/src/overlap_pmmg.c @@ -0,0 +1,805 @@ +/* ============================================================================= +** This file is part of the parmmg software package for parallel tetrahedral +** mesh modification. +** Copyright (c) Bx INP/Inria/UBordeaux, 2017- +** +** parmmg is free software: you can redistribute it and/or modify it +** under the terms of the GNU Lesser General Public License as published +** by the Free Software Foundation, either version 3 of the License, or +** (at your option) any later version. +** +** parmmg is distributed in the hope that it will be useful, but WITHOUT +** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +** FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +** License for more details. +** +** You should have received a copy of the GNU Lesser General Public +** License and of the GNU General Public License along with parmmg (in +** files COPYING.LESSER and COPYING). If not, see +** . Please read their terms carefully and +** use this copy of the parmmg distribution only if you accept them. +** ============================================================================= +*/ + +/** + * \file overlap_pmmg.c + * \brief Create and delete overlap. + * \author Cécile Dobrzynski (Bx INP/Inria/UBordeaux) + * \author Algiane Froehly (InriaSoft) + * \author Laetitia Mottet (UBordeaux) + * \version 1 + * \copyright GNU Lesser General Public License. + * + * Functions to create and delete the overlap + * + */ + +#include "parmmg.h" +#include "mmgexterns_private.h" +#include "inlined_functions_3d_private.h" + +/** + * \param parmesh pointer toward a parmesh structure + * \param comm MPI communicator for ParMmg + * + * \return 1 if success, 0 if fail. + * + * Create the overlap. The overlap consists in sending to and receiving from + * neighbour partitions one extra layer of point and associated tetra. + * + * \remark Data transferred between partitions: + * - mesh->point.c, mesh->point.tag and mesh->point.ref + * - mesh->tetra.v and mesh->tetra.ref + * - mesh->ls + * Data NOT transferred between partitions: + * - Other mesh->point and mesh->tetra fields + * - mesh->xtetra fields + * + */ +int PMMG_create_overlap(PMMG_pParMesh parmesh, MPI_Comm comm) { + + /* Local variables + Remark: *Interior_*: pts not tagged MG_PARBDY + *PBDY_* : pts tagged MG_PARBDY + *in2out: data from color_in sends to color_out + *out2in: data on color_in receives from color_out */ + PMMG_pInt_comm int_comm; + PMMG_pExt_comm ext_comm,ext_comm_ter; + PMMG_pGrp grp; + MMG5_pMesh mesh; + MMG5_pSol ls; + PMMG_pOverlap overlap; + MPI_Status status; + MMG5_pPoint p0; + MMG5_pTetra pt; + + int ier = 1; // Initialize error + int local_mpi_tag; // Tag used for MPI comm + int i,j,k,r; + int idx, ip, ip_in,ip_out,ip_ter,pos; + int icomm,icomm_ter; + int ref; + int duplicated_point; + int nitem_ext,nitem_ext_ter,next_comm; + int color_in,color_out,color_ter; + double coord[3],ls_val; + uint16_t tag; + + int *n_ToSend, *n_ToRecv; // Tables to send/receive nbr of points and tetras + int *hash_in2out, *hash_out2in; // Hash table needed in structure PMMG_pOverlap + double *lsInterior_ToSend, *lsPBDY_ToSend, *lsInterior_ToRecv, *lsPBDY_ToRecv; // LS values to send/receive + + /* Number of tetras */ + int nt_initial; // Initial tetras nbr on Pcolor_in + int ntTot_in2out; // Total tetras nbr from Pcolor_in sends to Pcolor_out + int ntTot_out2in; // Total tetras nbr on Pcolor_in receives from Pcolor_out + + /* Tetras vertices index and ref to send/receive */ + int *tetraVertices_ToSend; // Indices of tetra vertices from Pcolor_in sends to Pcolor_out + int *tetraVertices_ToRecv_inIdx; // Indices of tetra vertices from Pcolor_out on Pcolor_in + int *tetraVertices_ToRecv_outIdx; // Indices of tetra vertices from Pcolor_out on Pcolor_out + int *tetraRef_ToSend, *tetraRef_ToRecv; + int *tetraVerticesSeen_ToSend, *tetraVerticesSeen_ToRecv; // Flag tetra vertices + + /* Number of points */ + int np_in, np_out; // Nbr of pts on Pcolor_in or Pcolor_out + int npInterior_in2out; // Nbr of pts not tagged MG_PARBDY from Pcolor_in sends to Pcolor_out + int npPBDY_in2out; // Nbr of pts tagged MG_PARBDY from Pcolor_in sends to Pcolor_out + int npInterior_out2in; // Nbr of pts not tagged MG_PARBDY from Pcolor_out receives on Pcolor_in + int npPBDY_out2in; // Nbr of pts tagged MG_PARBDY from Pcolor_out receives on Pcolor_in + int npTot_in2out; // Total nbr of pts from Pcolor_in sends to Pcolor_out npTot=npInterior+npPBDY + int npTot_out2in; // Total nbr of pts from Pcolor_out receives on Pcolor_in npTot=npInterior+npPBDY + + /* Points coordinates, tag, index and ref to send/receive */ + double *pointCoordInterior_ToSend, *pointCoordPBDY_ToSend; + double *pointCoordInterior_ToRecv, *pointCoordPBDY_ToRecv; + uint16_t *pointTagInterior_ToSend, *pointTagPBDY_ToSend; + uint16_t *pointTagInterior_ToRecv, *pointTagPBDY_ToRecv; + int *pointRefInterior_ToSend, *pointRefPBDY_ToSend; + int *pointRefInterior_ToRecv, *pointRefPBDY_ToRecv; + int *pointIdxPBDY_ToSend, *pointIdxInterface_ToSend; + int *pointIdxPBDY_ToRecv, *pointIdxInterface_ToRecv; + + /* Data needed to identify MG_PARBDY pts located on Pcolor_ter + and ensure they are added only once in Pcolor_in */ + int ndataPBDY_in2out, ndataPBDY_out2in, ndataPBDY_added; // Nbr of MG_PARBDY points + int *dataPBDY_ToSend, *dataPBDY_ToRecv, *dataPBDY_AlreadyAdded; // Data to identify MG_PARBDY points + + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) + fprintf(stdout,"\n ## Create Overlap.\n"); + + /* Creation of overlap works only when there is one group */ + /* Ensure only one group on each proc */ + assert ( (parmesh->ngrp == 1 || parmesh->ngrp == 0) + && "Overlap not implemented for more than 1 group per rank"); + + /* Global initialization */ + grp = &parmesh->listgrp[0]; + mesh = parmesh->listgrp[0].mesh; + ls = parmesh->listgrp[0].ls; + next_comm = parmesh->next_node_comm; // Nbr of external node communicators + int_comm = parmesh->int_node_comm; // Internal node communicator + nt_initial = mesh->ne; + ndataPBDY_added = 0; + + /* Reset flags */ + for (k=1; k<=mesh->np; k++) + mesh->point[k].flag = 0; + + /* Global allocation memory */ + PMMG_CALLOC(parmesh,int_comm->intvalues,int_comm->nitem,int,"intvalues",return 0); + PMMG_CALLOC(parmesh,parmesh->overlap,next_comm,PMMG_Overlap,"overlap",ier = 0); + PMMG_CALLOC(parmesh,dataPBDY_AlreadyAdded,5*mesh->np,int,"dataPBDY_AlreadyAdded",ier = 0); + + PMMG_CALLOC(parmesh,n_ToSend,6,int,"n_ToSend",ier = 0); + PMMG_CALLOC(parmesh,n_ToRecv,6,int,"n_ToRecv",ier = 0); + PMMG_CALLOC(parmesh,pointCoordInterior_ToSend, 3*mesh->np, double, "pointCoordInterior_ToSend",ier = 0); + PMMG_CALLOC(parmesh,pointCoordPBDY_ToSend, 3*mesh->np, double, "pointCoordPBDY_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointTagInterior_ToSend, mesh->np, uint16_t,"pointTagInterior_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointTagPBDY_ToSend, mesh->np, uint16_t,"pointTagPBDY_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointRefInterior_ToSend, mesh->np, int, "pointRefInterior_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointRefPBDY_ToSend, mesh->np, int, "pointRefPBDY_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointIdxPBDY_ToSend, mesh->np, int, "pointIdxPBDY_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointIdxInterface_ToSend, mesh->np, int, "pointIdxInterface_ToSend",ier = 0); + PMMG_CALLOC(parmesh,tetraVertices_ToSend, 4*mesh->ne, int, "tetraVertices_ToSend", ier = 0); + PMMG_CALLOC(parmesh,tetraVerticesSeen_ToSend, 4*mesh->ne, int, "tetraVerticesSeen_ToSend", ier = 0); + PMMG_CALLOC(parmesh,tetraRef_ToSend, mesh->ne, int, "tetraRef_ToSend", ier = 0); + PMMG_CALLOC(parmesh,lsInterior_ToSend, mesh->np, double, "lsInterior_ToSend", ier = 0); + PMMG_CALLOC(parmesh,lsPBDY_ToSend, mesh->np, double, "lsPBDY_ToSend", ier = 0); + + /** STEP 1 - Store point index in internal communicator intvalues. + Like we have only one group (see the assert at the beginning of the function), + we can store directly the id of the node in the internal comm int_comm->intvalue + as we know it won't be overwrite by another group. Indeed, entities shared + by the groups have a unique position in this buffer. In short, the next steps + work because we have only one group when we are in this function. **/ + for( i = 0; i < grp->nitem_int_node_comm; i++ ){ + ip = grp->node2int_node_comm_index1[i]; + pos = grp->node2int_node_comm_index2[i]; + int_comm->intvalues[pos] = ip; + } + + /* Loop over the number of external node communicator */ + for (icomm=0; icommnp; + + /* Get external node communicator information */ + ext_comm = &parmesh->ext_node_comm[icomm]; // External node communicator + color_in = ext_comm->color_in; // Color of this partition Pcolor_in + color_out = ext_comm->color_out; // Color of the remote partition Pcolor_out + nitem_ext = ext_comm->nitem; // Nbr of nodes in common between Pcolor_in and Pcolor_out + + /* Overlap variables */ + overlap = &parmesh->overlap[icomm]; + overlap->color_in = color_in; + overlap->color_out = color_out; + + /** STEP 2 - Identify nodes at the interface between Pcolor_in and Pcolor_out + a) Assign flag -1 to these nodes + b) Store the local point indices in pointIdxInterface_ToSend + to be able to fill hash_out2in + Note: The following works because we have only one group in this function + (see the assert at the beginning of the function) and we have been able + to store the local index of node **/ + /* Loop over the nodes in the external node communicator */ + for (i=0; i < nitem_ext; i++) { + /* Get the index of the node stored in internal communicator */ + pos = ext_comm->int_comm_index[i]; + ip = int_comm->intvalues[pos]; + + /* Add the flag -1 to this point */ + p0 = &mesh->point[ip]; + p0->flag = -1; + pointIdxInterface_ToSend[i] = ip; + } + + /** STEP 3 - Identification of points and tetras to send to Pcolor_out + tetraVerticesSeen_ToSend: flag the tetra vertices using following rules + +1 means the vertex is seen for the first time + 0 means the vertex has been already seen (from another tetra) + OR is on the interface between Pcolor_in and Pcolor_out + OR is tagged MG_PARBDY (special treatments for those) **/ + /* Loop over the tetra on this partition, i.e. Pcolor_in */ + for (k=1; k<=nt_initial; k++) { + pt = &mesh->tetra[k]; + if (!MG_EOK(pt)) continue; + + /* If one vertex if flag -1, assign MG_OVERLAP to this tetra */ + for (i=0; i<4; i++) { + ip = pt->v[i]; + p0 = &mesh->point[ip]; + if ( p0->flag < 0 ) { + pt->tag |= MG_OVERLAP; + break; + } + } + + /* If tetra has not been identified as MG_OVERLAP, then ignore it */ + if ( !(pt->tag & MG_OVERLAP) ) continue; + + tetraRef_ToSend[ntTot_in2out] = pt->ref; + + /* Loop over the vertices of this tetra, all the nodes belong to the overlap + that we want to send to Pcolor_out */ + for (i=0; i<4; i++) { + ip = pt->v[i]; + p0 = &mesh->point[ip]; + tetraVertices_ToSend[4*ntTot_in2out+i] = ip; + + /* If this node has never been seen before for the partition on Pcolor_out + (i.e. p0->flag!=color_out+1) and is not on interface between Pcolor_in + and Pcolor_out (p0->flag>=0): + a) in VerticesSeen_ToSend : assign 1 if this node is not tagged MG_PARBDY; + assign 0 otherwise (special treatment for those). + b) store its coordinates in pointCoord*_ToSend, its tag in + pointTag*_ToSend; its ref in pointRef*_ToSend; its level-set value + in ls*_ToSend. `*` being either `Interior` for pts not tagged + MG_PARBDY or `PBDY` for pts tagged MG_PARBDY */ + if ( (p0->flag>=0) && (p0->flag!=color_out+1) ) { + + /* Update flag of this vertex to identify it has already been seen */ + p0->flag = color_out+1; + + /* If this vertex is not tagged MG_PARBDY, store needed info in *Interior* to be sent to Pcolor_out */ + if (!(p0->tag & MG_PARBDY)) { + tetraVerticesSeen_ToSend[4*ntTot_in2out+i] = 1; // Vertex seen for the first time + pointCoordInterior_ToSend[3*npInterior_in2out] = p0->c[0]; + pointCoordInterior_ToSend[3*npInterior_in2out+1] = p0->c[1]; + pointCoordInterior_ToSend[3*npInterior_in2out+2] = p0->c[2]; + pointTagInterior_ToSend[npInterior_in2out] = p0->tag; + pointRefInterior_ToSend[npInterior_in2out] = p0->ref; + lsInterior_ToSend[npInterior_in2out] = ls->m[ip]; + npInterior_in2out++; + } + /* If this vertex is tagged MG_PARBDY, store needed info in *PBDY* to + be sent to Pcolor_out. pointIdxPBDY_ToSend stores MG_PARBDY points + except the ones at interface between Pcolor_in and Pcolor_out */ + else { + tetraVerticesSeen_ToSend[4*ntTot_in2out+i] = 0; // Vertex tagged MG_PARBDY (special treatments for those) + pointCoordPBDY_ToSend[3*npPBDY_in2out] = p0->c[0]; + pointCoordPBDY_ToSend[3*npPBDY_in2out+1] = p0->c[1]; + pointCoordPBDY_ToSend[3*npPBDY_in2out+2] = p0->c[2]; + pointTagPBDY_ToSend[npPBDY_in2out] = p0->tag; + pointRefPBDY_ToSend[npPBDY_in2out] = p0->ref; + pointIdxPBDY_ToSend[npPBDY_in2out] = ip; + lsPBDY_ToSend[npPBDY_in2out] = ls->m[ip]; + npPBDY_in2out++; + } + } + /* If this node is on the interface between Pcolor_in and Pcolor_out (p0->flag=-1), + this node will be treated separately, assign 0 in tetraVerticesSeen_ToSend + OR + If this node has been seen before in another tetra (p0->flag = color_out+1), + assign 0 in tetraVerticesSeen_ToSend */ + else { + tetraVerticesSeen_ToSend[4*ntTot_in2out+i] = 0; + } + } + ntTot_in2out++; + /* Remove the tag MG_OVERLAP : here it is used as a way to identify the + tetra to be sent. Later, tetras having the tag MG_OVERLAP will actually + be tetras received from Pcolor_out */ + pt->tag &= ~MG_OVERLAP; + } + + /* The total number of point to send to Pcolor_out npTot_in2out is + the nbr of point not tagged MG_PARBDY npInterior_in2out + plus the ones tagged MG_PARBDY npPBDY_in2out */ + npTot_in2out = npInterior_in2out + npPBDY_in2out; + + /* Reinitialize points flags and tag **/ + for (i=0; i < nitem_ext; i++) { + pos = ext_comm->int_comm_index[i]; + ip = int_comm->intvalues[pos]; + p0 = &mesh->point[ip]; + p0->flag = 0; + if (p0->tag & MG_OVERLAP) p0->tag &=~ MG_OVERLAP; + } + + /** STEP 4 - Special treatment for nodes with tag MG_PARBDY. + Identification of the points at the interface between Pcolor_in + and another Pcolor_ter (!=Pcolor_out) to be sent to Pcolor_out. + Array `dataPBDY_ToSend` (for points with MG_PARBDY) stores + - Pcolor_ter: the other partition w/ which this node is shared + (Pcolor_ter!=Pcolor_out) + - The position of the point in the external communicator + - The local index in the mesh + NB: This part of the algo might be optimised **/ + + /* Allocate the variables dataPBDY_ToSend */ + PMMG_CALLOC(parmesh,dataPBDY_ToSend,3*npPBDY_in2out*parmesh->nprocs,int,"dataPBDY_ToSend",ier = 0); + + /* Loop over the nodes with MG_PARBDY tags to store useful data to identify + them in dataPBDY_ToSend */ + for (i=0; ipoint[ip]; + + /* Loop over the partitions having nodes in common w/ Pcolor_in */ + for (icomm_ter=0; icomm_terext_node_comm[icomm_ter]; // External node communicator + color_ter = ext_comm_ter->color_out; // Color of the remote partition Pcolor_ter + nitem_ext_ter = ext_comm_ter->nitem; // Nbr of nodes in common between Pcolor_in and Pcolor_ter + + assert( (color_ter != color_out) && "Unexpected case: duplicated communicator?" ); + + /* Loop over the nodes in the external node communicator Pcolor_ter */ + for (j=0; j < nitem_ext_ter; j++) { + /* Get the indices of the nodes in internal communicators */ + pos = ext_comm_ter->int_comm_index[j]; + ip_ter = int_comm->intvalues[pos]; + + if ( !(ip==ip_ter) ) continue; + + /* Each time the node ip is found being shared w/ another + partition store Pcolor_ter and the position j of this node + in the external comm of Pcolor_ter and the local index of the point */ + dataPBDY_ToSend[3*ndataPBDY_in2out] = color_ter; + dataPBDY_ToSend[3*ndataPBDY_in2out+1] = j; // position in external communicator + dataPBDY_ToSend[3*ndataPBDY_in2out+2] = ip; // index of point on this partition + ndataPBDY_in2out++; + break; + } + } + } + + /** STEP 5 - Store all the different sizes to exchange **/ + n_ToSend[0] = npInterior_in2out;// Nbr of interior point from Pcolor_in to send to Pcolor_out + n_ToSend[1] = npPBDY_in2out; // Nbr of MG_PARBDY point from Pcolor_in to send to Pcolor_out + n_ToSend[2] = npTot_in2out; // Total nbr of points from Pcolor_in to send to Pcolor_out + n_ToSend[3] = ndataPBDY_in2out; // Nbr of data for MG_PARBDY points from Pcolor_in to send to Pcolor_out + n_ToSend[4] = np_in; // Total nbr of points on mesh Pcolor_in + n_ToSend[5] = ntTot_in2out; // Total nbr of tetras from Pcolor_in to send to Pcolor_out + + /** STEP 6 - Send and Receive all the data from the other partitions **/ + // TODO :: Improve number of communications + /* STEP 6.1 - First send/receive the different sizes to exchange */ + MPI_CHECK( + MPI_Sendrecv(n_ToSend,6,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + n_ToRecv,6,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + npInterior_out2in = n_ToRecv[0]; + npPBDY_out2in = n_ToRecv[1]; + npTot_out2in = n_ToRecv[2]; + ndataPBDY_out2in = n_ToRecv[3]; + np_out = n_ToRecv[4]; + ntTot_out2in = n_ToRecv[5]; + + /* Fill overlap variables*/ + overlap->np_in2out = npTot_in2out; // Total pts nbr sends from Pcolor_in to Pcolor_out + overlap->np_out2in = npTot_out2in; // Total pts nbr receives on Pcolor_in from Pcolor_out + overlap->nt_in2out = ntTot_in2out; // Total tetra nbr sends from Pcolor_in to Pcolor_out + overlap->nt_out2in = ntTot_out2in; // Total tetra nbr receives on Pcolor_in from Pcolor_out + + /* STEP 6.2 - Alloc hash table */ + hash_in2out = overlap->hash_in2out; + hash_out2in = overlap->hash_out2in; + PMMG_CALLOC(parmesh,hash_in2out,np_in +npTot_out2in+1,int,"hash_in2out",ier = 0); + PMMG_CALLOC(parmesh,hash_out2in,np_out +1,int,"hash_out2in",ier = 0); + + /* STEP 6.3 - Send and receive all the other data */ + /* Send/receive indices of tetras vertices */ + PMMG_CALLOC(parmesh,tetraVertices_ToRecv_outIdx,4*ntTot_out2in,int,"tetraVertices_ToRecv_outIdx",ier = 0); + PMMG_CALLOC(parmesh,tetraVertices_ToRecv_inIdx, 4*ntTot_out2in,int,"tetraVertices_ToRecv_inIdx",ier = 0); + MPI_CHECK( + MPI_Sendrecv(tetraVertices_ToSend, 4*ntTot_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + tetraVertices_ToRecv_outIdx,4*ntTot_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + /* Send/receive flag to know if tetra vertex has already been seen */ + PMMG_CALLOC(parmesh,tetraVerticesSeen_ToRecv,4*ntTot_out2in,int,"tetraVerticesSeen_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(tetraVerticesSeen_ToSend,4*ntTot_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + tetraVerticesSeen_ToRecv,4*ntTot_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + /* Send/receive tetras refs */ + PMMG_CALLOC(parmesh,tetraRef_ToRecv,ntTot_out2in,int,"tetraRef_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(tetraRef_ToSend,ntTot_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + tetraRef_ToRecv,ntTot_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + /* Send/receive points indices */ + PMMG_CALLOC(parmesh,pointIdxInterface_ToRecv,nitem_ext,int,"pointIdxInterface_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(pointIdxInterface_ToSend,nitem_ext,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointIdxInterface_ToRecv,nitem_ext,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + PMMG_CALLOC(parmesh,pointIdxPBDY_ToRecv,npPBDY_out2in,int,"pointIdxPBDY_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(pointIdxPBDY_ToSend,npPBDY_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointIdxPBDY_ToRecv,npPBDY_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + /* Send/receive points tag */ + PMMG_CALLOC(parmesh,pointTagInterior_ToRecv,npInterior_out2in,uint16_t,"pointTagInterior_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(pointTagInterior_ToSend,npInterior_in2out,MPI_UINT16_T,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointTagInterior_ToRecv,npInterior_out2in,MPI_UINT16_T,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + PMMG_CALLOC(parmesh,pointTagPBDY_ToRecv,npPBDY_out2in,uint16_t,"pointTagPBDY_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(pointTagPBDY_ToSend,npPBDY_in2out,MPI_UINT16_T,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointTagPBDY_ToRecv,npPBDY_out2in,MPI_UINT16_T,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + /* Send/receive points references */ + PMMG_CALLOC(parmesh,pointRefInterior_ToRecv,npInterior_out2in,int,"pointRefInterior_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(pointRefInterior_ToSend,npInterior_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointRefInterior_ToRecv,npInterior_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + PMMG_CALLOC(parmesh,pointRefPBDY_ToRecv,npPBDY_out2in,int,"pointRefPBDY_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(pointRefPBDY_ToSend,npPBDY_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointRefPBDY_ToRecv,npPBDY_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + /* Send/receive points coordinates */ + PMMG_CALLOC(parmesh,pointCoordInterior_ToRecv,3*npInterior_out2in,double,"pointCoordInterior_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(pointCoordInterior_ToSend,3*npInterior_in2out,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointCoordInterior_ToRecv,3*npInterior_out2in,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + PMMG_CALLOC(parmesh,pointCoordPBDY_ToRecv,3*npPBDY_out2in,double,"pointCoordPBDY_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(pointCoordPBDY_ToSend,3*npPBDY_in2out,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointCoordPBDY_ToRecv,3*npPBDY_out2in,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + /* Send/receive data needed to identify MG_PARBDY nodes */ + PMMG_CALLOC(parmesh,dataPBDY_ToRecv,3*ndataPBDY_out2in,int,"dataPBDY_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(dataPBDY_ToSend,3*ndataPBDY_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + dataPBDY_ToRecv,3*ndataPBDY_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + /* Send/receive LS values */ + PMMG_CALLOC(parmesh,lsInterior_ToRecv,npInterior_out2in,double,"lsInterior_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(lsInterior_ToSend,npInterior_in2out,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + lsInterior_ToRecv,npInterior_out2in,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + PMMG_CALLOC(parmesh,lsPBDY_ToRecv,npPBDY_out2in,double,"lsPBDY_ToRecv",ier = 0); + MPI_CHECK( + MPI_Sendrecv(lsPBDY_ToSend,npPBDY_in2out,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + lsPBDY_ToRecv,npPBDY_out2in,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),return 0 ); + local_mpi_tag++; + + /** STEP 7 - Fill the overlap hash tables with interface points **/ + for (i=0; i < nitem_ext; i++) { + ip_out = pointIdxInterface_ToRecv[i]; // Point index on Pcolor_out (the other partition) + ip_in = pointIdxInterface_ToSend[i]; // Point index on Pcolor_in (this partition) + hash_out2in[ip_out] = ip_in; // From index on Pcolor_out, I found index on Pcolor_in + hash_in2out[ip_in] = ip_out; // From index on Pcolor_in, I found index on Pcolor_out + } + + /** STEP 8 - Add nodes from Pcolor_out having MG_PARBDY tag to: + * a) the local mesh on Pcolor_in + * b) the overlap hash tables (hash_out2in and hash_in2out) + * Create variable dataPBDY_AlreadyAdded storing: + * (1) color_out: neighbouring partition + * (2) color_ter: tierce partition w/ which MG_PARBDY nodes is shared + * (3) k: position of node ip in external communicator Pcolor_out-Pcolor_ter + * (4) ip_in: local index of node on Pcolor_in + * (5) ip_out: local index of node on Pcolor_out + * Steps are as follows: + * 8.1. Identify if duplicated_point = 0 or = 1 based on data in dataPBDY_AlreadyAdded + * 8.2. If duplicated_point = 0, then we add this new point to the mesh, + * otherwise do nothing + * 8.3. For both duplicated_point = 0 and = 1, the hash tables (to find + * correspondence between indexes) is updated with the information of this point + * 8.4. For both duplicated_point = 0 and = 1, dataPBDY_AlreadyAdded is + * updated with the information of this point. + **/ + mesh->xpmax = MG_MAX( (MMG5_int)(1.5*mesh->xp),mesh->npmax); + + /* Loop over the points having MG_PARBDY tag to add them to mesh on Pcolor_in */ + for (i=0; i < npPBDY_out2in; i++) { + ip_out = pointIdxPBDY_ToRecv[i]; // Index of point on Pcolor_out + coord[0] = pointCoordPBDY_ToRecv[3*i]; + coord[1] = pointCoordPBDY_ToRecv[3*i+1]; + coord[2] = pointCoordPBDY_ToRecv[3*i+2]; + tag = pointTagPBDY_ToRecv[i] | MG_OVERLAP; // Add the tag MG_OVERLAP to this point + ref = pointRefPBDY_ToRecv[i]; + ls_val = lsPBDY_ToRecv[i]; + + /* Loop over the array dataPBDY_ToRecv allowing to find the point ip_out */ + for (j=0; j < ndataPBDY_out2in; j++) { + + color_ter = dataPBDY_ToRecv[3*j]; + k = dataPBDY_ToRecv[3*j+1]; // Position in external communicator Pcolor_out-Pcolor_ter + ip = dataPBDY_ToRecv[3*j+2]; // Index of point on Pcolor_out + + if ( !(ip == ip_out) ) continue; + + /* STEP 8.1 - Search into dataPBDY_AlreadyAdded if this point has already + been added to the mesh + - if this point exists already, then duplicated_point = 1; + - otherwise duplicated_point = 0 */ + duplicated_point = 0; + for (r=0; r < ndataPBDY_added+1; r++) { + /* If the tuple (Pcolor_out-Pcolor_ter) is the right one */ + if ( (color_ter == dataPBDY_AlreadyAdded[5*r]) && (color_out == dataPBDY_AlreadyAdded[5*r+1]) ) { + /* And the point is at the same position in external comm */ + if ( k == dataPBDY_AlreadyAdded[5*r+2] ) { + /* then this point has alreadyh been added */ + ip_in = dataPBDY_AlreadyAdded[5*r+3]; + duplicated_point = 1; + break; + } + } + /* If this node ip_out from Pcolor_out has already been added: + get the index ip_in it has on this partition Pcolor_in */ + if ( (color_out == dataPBDY_AlreadyAdded[5*r]) && (ip_out == dataPBDY_AlreadyAdded[5*r+4])) { + ip_in = dataPBDY_AlreadyAdded[5*r+3]; + duplicated_point = 1; + break; + } + } + + /* STEP 8.2 - If this point has not been added yet duplicated_point = 0, + then add it to the mesh; otherwise do nothing */ + if (duplicated_point==0) { + ip_in = MMG3D_newPt(mesh,coord,tag,1); + mesh->point[ip_in].ref = ref; // Add ref + mesh->point[ip_in].xp = 0; // Assign 0 to xp + ls->m[ip_in] = ls_val; + } + + /* STEP 8.3 - Update the hash tables to know correspondence of + local index on color_in and color_out */ + hash_out2in[ip_out] = ip_in; // From index on color_out, I found index on color_in + hash_in2out[ip_in] = ip_out; // From index on color_in, I found index on color_out + + /* STEP 8.4 - Update dataPBDY_AlreadyAdded data necessary to identify which + MG_PARBDY points have already been treated */ + dataPBDY_AlreadyAdded[5*ndataPBDY_added] = color_out; + dataPBDY_AlreadyAdded[5*ndataPBDY_added+1]= color_ter; + dataPBDY_AlreadyAdded[5*ndataPBDY_added+2]= k; + dataPBDY_AlreadyAdded[5*ndataPBDY_added+3]= ip_in; + dataPBDY_AlreadyAdded[5*ndataPBDY_added+4]= ip_out; + ndataPBDY_added++; + } + } + + /** STEP 9 - Add all the other nodes from Pcolor_out to: + * a) the local mesh on Pcolor_in + * b) the overlap hash tables (hash_out2in and hash_in2out) **/ + j=0; + + for (i=0; i < 4*ntTot_out2in; i++) { + ip_out=tetraVertices_ToRecv_outIdx[i]; // This point has the index ip_out on Pcolor_out + + if (tetraVerticesSeen_ToRecv[i]==1) { + coord[0] = pointCoordInterior_ToRecv[3*j]; + coord[1] = pointCoordInterior_ToRecv[3*j+1]; + coord[2] = pointCoordInterior_ToRecv[3*j+2]; + ref = pointRefInterior_ToRecv[j]; + tag = pointTagInterior_ToRecv[j] | MG_OVERLAP; // Add the tag MG_OVERLAP to this point + ls_val = lsInterior_ToRecv[j]; + j += 1; + + /* New overlapping node is created. src is set equal to 1 by default, + but the value does not really matter because this point will then be + deleted by PMMG_delete_overlap once the overlap is not needed anymore */ + ip_in = MMG3D_newPt(mesh,coord,tag,1); + mesh->point[ip_in].ref = ref; // Assign ref + mesh->point[ip_in].xp = 0; // Assign 0 to xp + ls->m[ip_in] = ls_val; + hash_out2in[ip_out] = ip_in; // From index on color_out, I found index on color_in + hash_in2out[ip_in] = ip_out; // From index on color_in, I found index on color_out + tetraVertices_ToRecv_inIdx[i]=ip_in; // This point has the index ip_in on Pcolor_in + } + else{ + tetraVertices_ToRecv_inIdx[i]=hash_out2in[ip_out]; // Find the local index of this point from hash table + } + } + + /** STEP 10 - Add the tetra to the mesh */ + for (i=0; i < ntTot_out2in; i++) { + /* Create a new tetra*/ + k = MMG3D_newElt(mesh); + if ( !k ) { + MMG3D_TETRA_REALLOC(mesh,k,mesh->gap, + fprintf(stderr,"\n ## Error: %s: unable to allocate" + " a new element.\n",__func__); + MMG5_INCREASE_MEM_MESSAGE(); + fprintf(stderr," Exit program.\n"); + return 0); + } + pt = &mesh->tetra[k]; + + /* Add vertices index, ref and tag to this new tetra */ + pt->v[0] = tetraVertices_ToRecv_inIdx[4*i]; + pt->v[1] = tetraVertices_ToRecv_inIdx[4*i+1]; + pt->v[2] = tetraVertices_ToRecv_inIdx[4*i+2]; + pt->v[3] = tetraVertices_ToRecv_inIdx[4*i+3]; + pt->ref = tetraRef_ToRecv[i]; + pt->tag |= MG_OVERLAP; + } + + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) + fprintf(stdout, " OVERLAP - part %d sends %d pts and %d tetra to part %d\n", + color_in,npTot_in2out,ntTot_in2out,color_out); + + /* Reset memory*/ + memset(n_ToSend,0x00,6*sizeof(int)); + memset(n_ToRecv,0x00,6*sizeof(int)); + + memset(pointCoordInterior_ToSend,0x00,3*npInterior_in2out*sizeof(double)); + PMMG_DEL_MEM(parmesh,pointCoordInterior_ToRecv,double,"pointCoordInterior_ToRecv"); + + memset(pointCoordPBDY_ToSend,0x00,3*npPBDY_in2out*sizeof(double)); + PMMG_DEL_MEM(parmesh,pointCoordPBDY_ToRecv,double,"pointCoordPBDY_ToRecv"); + + memset(pointTagInterior_ToSend,0x00,npInterior_in2out*sizeof(uint16_t)); + PMMG_DEL_MEM(parmesh,pointTagInterior_ToRecv,uint16_t,"pointTagInterior_ToRecv"); + + memset(pointTagPBDY_ToSend,0x00,npPBDY_in2out*sizeof(uint16_t)); + PMMG_DEL_MEM(parmesh,pointTagPBDY_ToRecv,uint16_t,"pointTagPBDY_ToRecv"); + + memset(pointRefInterior_ToSend,0x00,npInterior_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,pointRefInterior_ToRecv,int,"pointRefInterior_ToRecv"); + + memset(pointRefPBDY_ToSend,0x00,npPBDY_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,pointRefPBDY_ToRecv,int,"pointRefPBDY_ToRecv"); + + memset(pointIdxInterface_ToSend,0x00,nitem_ext*sizeof(int)); + PMMG_DEL_MEM(parmesh,pointIdxInterface_ToRecv,int,"pointIdxInterface_ToRecv"); + + memset(pointIdxPBDY_ToSend,0x00,npPBDY_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,pointIdxPBDY_ToRecv,int,"pointIdxPBDY_ToRecv"); + + memset(tetraVertices_ToSend,0x00,4*ntTot_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,tetraVertices_ToRecv_inIdx, int,"tetraVertices_ToRecv_inIdx"); + PMMG_DEL_MEM(parmesh,tetraVertices_ToRecv_outIdx,int,"tetraVertices_ToRecv_outIdx"); + + memset(tetraVerticesSeen_ToSend,0x00,4*ntTot_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,tetraVerticesSeen_ToRecv,int,"tetraVerticesSeen_ToRecv"); + + memset(tetraRef_ToSend,0x00,ntTot_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,tetraRef_ToRecv,int,"tetraRef_ToRecv"); + + memset(lsInterior_ToSend,0x00,npInterior_in2out*sizeof(double)); + PMMG_DEL_MEM(parmesh,lsInterior_ToRecv,double,"lsInterior_ToRecv"); + + memset(lsPBDY_ToSend,0x00,npPBDY_in2out*sizeof(double)); + PMMG_DEL_MEM(parmesh,lsPBDY_ToRecv,double,"lsPBDY_ToRecv"); + + PMMG_DEL_MEM(parmesh,dataPBDY_ToSend,int,"dataPBDY_ToSend"); + PMMG_DEL_MEM(parmesh,dataPBDY_ToRecv,int,"dataPBDY_ToRecv"); + } + + /* Deallocate memory*/ + PMMG_DEL_MEM(parmesh,n_ToSend,int,"n_ToSend"); + PMMG_DEL_MEM(parmesh,n_ToRecv,int,"n_ToRecv"); + PMMG_DEL_MEM(parmesh,pointCoordInterior_ToSend,double,"pointCoordInterior_ToSend"); + PMMG_DEL_MEM(parmesh,pointCoordPBDY_ToSend,double,"pointCoordPBDY_ToSend"); + PMMG_DEL_MEM(parmesh,pointTagInterior_ToSend,uint16_t,"pointTagInterior_ToSend"); + PMMG_DEL_MEM(parmesh,pointTagPBDY_ToSend,uint16_t,"pointTagPBDY_ToSend"); + PMMG_DEL_MEM(parmesh,pointRefInterior_ToSend,int,"pointRefInterior_ToSend"); + PMMG_DEL_MEM(parmesh,pointRefPBDY_ToSend,int,"pointRefPBDY_ToSend"); + PMMG_DEL_MEM(parmesh,pointIdxInterface_ToSend,int,"pointIdxInterface_ToSend"); + PMMG_DEL_MEM(parmesh,pointIdxPBDY_ToSend,int,"pointIdxPBDY_ToSend"); + PMMG_DEL_MEM(parmesh,tetraVertices_ToSend,int,"tetraVertices_ToSend"); + PMMG_DEL_MEM(parmesh,tetraVerticesSeen_ToSend,int,"tetraVerticesSeen_ToSend"); + PMMG_DEL_MEM(parmesh,tetraRef_ToSend,int,"tetraRef_ToSend"); + PMMG_DEL_MEM(parmesh,lsInterior_ToSend,double,"lsInterior_ToSend"); + PMMG_DEL_MEM(parmesh,lsPBDY_ToSend,double,"lsPBDY_ToSend"); + + PMMG_DEL_MEM(parmesh,dataPBDY_AlreadyAdded,int,"dataPBDY_AlreadyAdded"); + PMMG_DEL_MEM(parmesh,int_comm->intvalues,int,"intvalues"); + + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) + fprintf(stdout, " OVERLAP - part %d has %d pts and %d tetras after overlap creation\n", + color_in,mesh->np,mesh->ne); + + return 1; +} + +/** + * \param parmesh pointer toward a parmesh structure + * \param comm MPI communicator for ParMmg + * + * \return 1 if success, 0 if fail. + * + * Delete the overlap points and tetras present in the mesh + * + */ +int PMMG_delete_overlap(PMMG_pParMesh parmesh, MPI_Comm comm) { + + /* Local variables */ + MMG5_pMesh mesh; + MMG5_pTetra pt; + MMG5_pPoint ppt; + + int i; + + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { + fprintf(stdout,"\n ## Delete Overlap.\n"); + } + + /* Delete overlap works only when there is one group */ + /* Ensure only one group on each proc */ + assert ( (parmesh->ngrp == 1 || parmesh->ngrp == 0) + && "Overlap not implemented for more than 1 group per rank"); + + /* Global initialization */ + mesh = parmesh->listgrp[0].mesh; + + /* Step 1 - Delete tetras with tag MG_OVERLAP */ + for (i=mesh->ne; i > 0; i--) { + pt = &mesh->tetra[i]; + if ( !(pt->tag & MG_OVERLAP) ) continue; + if ( !MMG3D_delElt(mesh,i) ) return 0; + } + + /* Step 2 - Delete points with tag MG_OVERLAP */ + for (i=mesh->np; i > 0; i--) { + ppt = &mesh->point[i]; + if ( !(ppt->tag & MG_OVERLAP) ) continue; + MMG3D_delPt(mesh,i); + } + + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) + fprintf(stdout, " OVERLAP - part %d has %d pts and %d tetras after overlap deletion\n", + parmesh->myrank,mesh->np,mesh->ne); + + return 1; +} diff --git a/src/parmmg.h b/src/parmmg.h index 440faa03..3388c85b 100644 --- a/src/parmmg.h +++ b/src/parmmg.h @@ -455,6 +455,10 @@ int PMMG_hashPar( MMG5_pMesh mesh,MMG5_HGeom *pHash ); int PMMG_hashPar_pmmg( PMMG_pParMesh parmesh,MMG5_HGeom *pHash ); int PMMG_hashOldPar_pmmg( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_Hash *hash ); +/* Overlap functions */ +int PMMG_create_overlap(PMMG_pParMesh parmesh,MPI_Comm comm); +int PMMG_delete_overlap(PMMG_pParMesh parmesh,MPI_Comm comm); + /* Isovalue discretization functions */ int PMMG_ls(PMMG_pParMesh parmesh); int PMMG_cuttet_ls(PMMG_pParMesh parmesh); diff --git a/src/quality_pmmg.c b/src/quality_pmmg.c index 737eb1e3..8bf4373a 100644 --- a/src/quality_pmmg.c +++ b/src/quality_pmmg.c @@ -393,7 +393,7 @@ int PMMG_computePrilen( PMMG_pParMesh parmesh,MMG5_pMesh mesh, MMG5_pSol met, do double len; int i,k,ia,np,nq,n; int ref; - int16_t tag; + uint16_t tag; int8_t i0,i1,ier; static double bd[9]= {0.0, 0.3, 0.6, 0.7071, 0.9, 1.3, 1.4142, 2.0, 5.0}; diff --git a/src/tag_pmmg.c b/src/tag_pmmg.c index a49fed2b..e811ec4d 100644 --- a/src/tag_pmmg.c +++ b/src/tag_pmmg.c @@ -68,8 +68,8 @@ void PMMG_tag_par_edge(MMG5_pxTetra pxt,int j){ * Tag an edge as parallel. */ int PMMG_tag_par_edge_hash(MMG5_pTetra pt,MMG5_HGeom hash,int ia){ - int ip0,ip1,getref; - int16_t gettag; + int ip0,ip1,getref; + uint16_t gettag; ip0 = pt->v[MMG5_iare[ia][0]]; ip1 = pt->v[MMG5_iare[ia][1]]; @@ -275,7 +275,7 @@ int PMMG_updateTag(PMMG_pParMesh parmesh) { MMG5_HGeom hash; int *node2int_node_comm0_index1,*face2int_face_comm0_index1; int grpid,iel,ifac,ia,ip0,ip1,k,j,i,getref; - int16_t gettag; + uint16_t gettag; int8_t isbdy; /* Loop on groups */