diff --git a/.gitmodules b/.gitmodules index c57c102c55a7..09d8dfc2af95 100644 --- a/.gitmodules +++ b/.gitmodules @@ -12,3 +12,6 @@ [submodule "externals/meson"] path = externals/meson url = https://github.com/mesonbuild/meson +[submodule "subprojects/pyBeam"] + path = subprojects/pyBeam + url = https://github.com/pyBeam/pyBeam diff --git a/SU2_CFD/src/drivers/CDiscAdjSinglezoneDriver.cpp b/SU2_CFD/src/drivers/CDiscAdjSinglezoneDriver.cpp index 495876b2c6af..187a0ebc962b 100644 --- a/SU2_CFD/src/drivers/CDiscAdjSinglezoneDriver.cpp +++ b/SU2_CFD/src/drivers/CDiscAdjSinglezoneDriver.cpp @@ -144,11 +144,7 @@ void CDiscAdjSinglezoneDriver::Preprocess(unsigned long TimeIter) { *--- respect to the conservative variables. Since these derivatives do not change in the steady state case *--- we only have to record if the current recording is different from the main variables. ---*/ - if (RecordingState != MainVariables){ - - MainRecording(); - - } + MainRecording(); } @@ -536,14 +532,17 @@ void CDiscAdjSinglezoneDriver::Print_DirectResidual(unsigned short kind_recordin void CDiscAdjSinglezoneDriver::MainRecording(){ - /*--- SetRecording stores the computational graph on one iteration of the direct problem. Calling it with NONE - * as argument ensures that all information from a previous recording is removed. ---*/ + if (RecordingState != MainVariables){ - SetRecording(NONE); + /*--- SetRecording stores the computational graph on one iteration of the direct problem. Calling it with NONE + * as argument ensures that all information from a previous recording is removed. ---*/ - /*--- Store the computational graph of one direct iteration with the conservative variables as input. ---*/ + SetRecording(NONE); - SetRecording(MainVariables); + /*--- Store the computational graph of one direct iteration with the conservative variables as input. ---*/ + + SetRecording(MainVariables); + } } diff --git a/SU2_CFD/src/solvers/CMeshSolver.cpp b/SU2_CFD/src/solvers/CMeshSolver.cpp index c124fa66cfa2..256016ac682f 100644 --- a/SU2_CFD/src/solvers/CMeshSolver.cpp +++ b/SU2_CFD/src/solvers/CMeshSolver.cpp @@ -645,45 +645,26 @@ void CMeshSolver::SetBoundaryDisplacements(CGeometry *geometry, CNumerics *numer unsigned short iMarker; - /*--- Impose zero displacements of all non-moving surfaces (also at nodes in multiple moving/non-moving boundaries). ---*/ - /*--- Exceptions: symmetry plane, the receive boundaries and periodic boundaries should get a different treatment. ---*/ + /*--- Impose zero displacements of all non-moving surfaces that are not MARKER_DEFORM_SYM_PLANE. ---*/ for (iMarker = 0; iMarker < config->GetnMarker_All(); iMarker++) { if ((config->GetMarker_All_Deform_Mesh(iMarker) == NO) && (config->GetMarker_All_Deform_Mesh_Sym_Plane(iMarker) == NO) && (config->GetMarker_All_Moving(iMarker) == NO) && - (config->GetMarker_All_KindBC(iMarker) != SYMMETRY_PLANE) && - (config->GetMarker_All_KindBC(iMarker) != SEND_RECEIVE) && - (config->GetMarker_All_KindBC(iMarker) != PERIODIC_BOUNDARY)) { + (config->GetMarker_All_KindBC(iMarker) != INTERNAL_BOUNDARY) && + (config->GetMarker_All_KindBC(iMarker) != SEND_RECEIVE)) { BC_Clamped(geometry, numerics, config, iMarker); } } - /*--- Symmetry plane is clamped, for now. ---*/ - for (iMarker = 0; iMarker < config->GetnMarker_All(); iMarker++) { - if ((config->GetMarker_All_Deform_Mesh(iMarker) == NO) && - (config->GetMarker_All_Deform_Mesh_Sym_Plane(iMarker) == NO) && - (config->GetMarker_All_Moving(iMarker) == NO) && - (config->GetMarker_All_KindBC(iMarker) == SYMMETRY_PLANE)) { - - BC_Clamped(geometry, numerics, config, iMarker); - } - } - - - /*--- Impose displacement boundary conditions. ---*/ + /*--- Impose displacement boundary conditions and symmetry. ---*/ for (iMarker = 0; iMarker < config->GetnMarker_All(); iMarker++) { if ((config->GetMarker_All_Deform_Mesh(iMarker) == YES) || (config->GetMarker_All_Moving(iMarker) == YES)) { BC_Deforming(geometry, numerics, config, iMarker); } - } - - - /*--- Symmetry deform plane is not clamped ---*/ - for (iMarker = 0; iMarker < config->GetnMarker_All(); iMarker++) { - if (config->GetMarker_All_Deform_Mesh_Sym_Plane(iMarker) == YES) { + else if (config->GetMarker_All_Deform_Mesh_Sym_Plane(iMarker) == YES) { BC_Sym_Plane(geometry, numerics, config, iMarker); } diff --git a/SU2_PY/FSI_tools/FSI_config.py b/SU2_PY/FSI_tools/FSI_config.py index 9bc2bf9dd999..d801619c51a8 100644 --- a/SU2_PY/FSI_tools/FSI_config.py +++ b/SU2_PY/FSI_tools/FSI_config.py @@ -5,22 +5,12 @@ # \authors Nicola Fonzi, Vittorio Cavalieri based on the work of David Thomas # \version 7.0.8 "Blackbird" # -# The current SU2 release has been coordinated by the -# SU2 International Developers Society -# with selected contributions from the open-source community. +# SU2 Project Website: https://su2code.github.io # -# The main research teams contributing to the current release are: -# - Prof. Juan J. Alonso's group at Stanford University. -# - Prof. Piero Colonna's group at Delft University of Technology. -# - Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology. -# - Prof. Alberto Guardone's group at Polytechnic University of Milan. -# - Prof. Rafael Palacios' group at Imperial College London. -# - Prof. Vincent Terrapon's group at the University of Liege. -# - Prof. Edwin van der Weide's group at the University of Twente. -# - Lab. of New Concepts in Aeronautics at Tech. Institute of Aeronautics. +# The SU2 Project is maintained by the SU2 Foundation +# (http://su2foundation.org) # -# Copyright 2012-2020, Francisco D. Palacios, Thomas D. Economon, -# Tim Albring, and the SU2 contributors. +# Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md) # # SU2 is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public diff --git a/SU2_PY/SU2_FSI/AdjointInterface.py b/SU2_PY/SU2_FSI/AdjointInterface.py new file mode 100644 index 000000000000..f2e5722bbd62 --- /dev/null +++ b/SU2_PY/SU2_FSI/AdjointInterface.py @@ -0,0 +1,860 @@ +#!/usr/bin/env python + +## \file AdjointInterface.py +# \brief Interface class that handles adjoint FSI synchronisation and communication. +# \author Ruben Sanchez +# \version 7.0.8 "Blackbird" +# +# SU2 Project Website: https://su2code.github.io +# +# The SU2 Project is maintained by the SU2 Foundation +# (http://su2foundation.org) +# +# Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md) +# +# SU2 is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# SU2 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with SU2. If not, see . + +# ---------------------------------------------------------------------- +# Imports +# ---------------------------------------------------------------------- + +import numpy as np +import shutil + +# ---------------------------------------------------------------------- +# FSI Interface Class +# ---------------------------------------------------------------------- + + +class AdjointInterface: + + """ + FSI interface class that handles fluid/solid solvers synchronisation and communication + """ + + def __init__(self, FSI_config, FluidSolver, SolidSolver, MLS_Solver, have_MPI): + """ + Class constructor. Declare some variables and do some screen outputs. + """ + + if have_MPI: + from mpi4py import MPI + self.MPI = MPI + self.comm = MPI.COMM_WORLD # MPI World communicator + self.have_MPI = True + myid = self.comm.Get_rank() + else: + self.comm = 0 + self.have_MPI = False + myid = 0 + + self.rootProcess = 0 # the root process is chosen to be MPI rank = 0 + + self.nDim = FSI_config['NDIM'] # problem dimension + + self.haveFluidSolver = False # True if the fluid solver is initialized on the current rank + self.haveSolidSolver = False # True if the solid solver is initialized on the current rank + self.haveFluidInterface = False # True if the current rank owns at least one fluid interface node + self.haveSolidInterface = False # True if the current rank owns at least one solid interface node + + self.fluidSolverProcessors = list() # list of partitions where the fluid solver is initialized + self.solidSolverProcessors = list() # list of partitions where the solid solver is initialized + self.fluidInterfaceProcessors = list() # list of partitions where there are fluid interface nodes + self.solidInterfaceProcessors = list() # list of partitions where there are solid interface nodes + + self.fluidInterfaceIdentifier = None # object that can identify the f/s interface within the fluid solver + self.solidInterfaceIdentifier = None # object that can identify the f/s interface within the solid solver + + self.fluidGlobalIndexRange = {} # contains the global FSI indexing of each fluid interface node for all partitions + self.solidGlobalIndexRange = {} # contains the global FSI indexing of each solid interface node for all partitions + + self.FluidHaloNodeList = {} # contains the the indices (fluid solver indexing) of the halo nodes for each partition + self.fluidIndexing = {} # links between the fluid solver indexing and the FSI indexing for the interface nodes + self.SolidHaloNodeList = {} # contains the the indices (solid solver indexing) of the halo nodes for each partition + self.solidIndexing = {} # links between the solid solver indexing and the FSI indexing for the interface nodes + + self.nLocalFluidInterfaceNodes = 0 # number of nodes (halo nodes included) on the fluid interface, on each partition + self.nLocalFluidInterfaceHaloNode = 0 # number of halo nodes on the fluid intrface, on each partition + self.nLocalFluidInterfacePhysicalNodes = 0 # number of physical (= non halo) nodes on the fluid interface, on each partition + self.nFluidInterfaceNodes = 0 # number of nodes on the fluid interface, sum over all the partitions + self.nFluidInterfacePhysicalNodes = 0 # number of physical nodes on the fluid interface, sum over all partitions + + self.nLocalSolidInterfaceNodes = 0 # number of physical nodes on the solid interface, on each partition + self.nLocalSolidInterfaceHaloNode = 0 # number of halo nodes on the solid intrface, on each partition + self.nLocalSolidInterfacePhysicalNodes = 0 # number of physical (= non halo) nodes on the solid interface, on each partition + self.nSolidInterfaceNodes = 0 # number of nodes on the solid interface, sum over all partitions + self.nSolidInterfacePhysicalNodes = 0 # number of physical nodes on the solid interface, sum over all partitions + + self.localFluidInterface_vertex_indices = None + + self.globalFluidInterfaceXcoor = None + self.globalFluidInterfaceYcoor = None + self.globalFluidInterfaceZcoor = None + + self.globalFluidCoordinates = None + self.globalSolidCoordinates = None + + self.sendCounts = None + self.globalFluidDispX = None + self.globalFluidDispY = None + self.globalFluidDispZ = None + + self.globalFluidLoadSensX = None + self.globalFluidLoadSensY = None + self.globalFluidLoadSensZ = None + + self.globalSolidDispX = None + self.globalSolidDispY = None + self.globalSolidDispZ = None + + self.globalSolidLoadSensX = None + self.globalSolidLoadSensY = None + self.globalSolidLoadSensZ = None + + self.globalFluidLoadX = None + self.globalFluidLoadY = None + self.globalFluidLoadZ = None + + self.globalDispFlowAdjointX = None + self.globalDispFlowAdjointY = None + self.globalDispFlowAdjointZ = None + + self.globalSolidLoadX = None + self.globalSolidLoadY = None + self.globalSolidLoadZ = None + + self.globalDispSolidAdjointX = None + self.globalDispSolidAdjointY = None + self.globalDispSolidAdjointZ = None + + self.globalDispSolidAdjointXOld = None + self.globalDispSolidAdjointYOld = None + self.globalDispSolidAdjointZOld = None + + self.haloNodesPositionsInit = {} # initial position of the halo nodes (fluid side only) + + self.solidInterface_array_DispX = None # solid interface displacement + self.solidInterface_array_DispY = None + self.solidInterface_array_DispZ = None + + self.solidInterfaceResidual_array_X = None # solid interface position residual + self.solidInterfaceResidual_array_Y = None + self.solidInterfaceResidual_array_Z = None + + self.solidInterfaceResidualnM1_array_X = None # solid interface position residual at the previous BGS iteration + self.solidInterfaceResidualnM1_array_Y = None + self.solidInterfaceResidualnM1_array_Z = None + + self.fluidInterface_array_DispX = None # fluid interface displacement + self.fluidInterface_array_DispY = None + self.fluidInterface_array_DispZ = None + + self.fluidLoads_array_X = None # loads on the fluid side of the f/s interface + self.fluidLoads_array_Y = None + self.fluidLoads_array_Z = None + + self.solidLoads_array_X = None # loads on the solid side of the f/s interface + self.solidLoads_array_Y = None + self.solidLoads_array_Z = None + + self.FSIIter = 0 # current FSI iteration + self.unsteady = False # flag for steady or unsteady simulation (default is steady) + + # ---Some screen output --- + self.MPIPrint('Fluid solver : SU2_CFD') + self.MPIPrint('Solid solver : pyBeam') + self.MPIPrint('Steady coupled simulation') + self.MPIPrint('Matching fluid-solid interface using Moving Least Squares method') + self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER'])) + self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE'])) + self.MPIPrint('Static under-relaxation with constant parameter {}'.format(FSI_config['RELAX_PARAM'])) + self.MPIPrint('FSI interface is set') + + def MPIPrint(self, message): + """ + Print a message on screen only from the master process. + """ + + if self.have_MPI: + myid = self.comm.Get_rank() + else: + myid = 0 + + if myid == self.rootProcess: + print(message) + + def MPIBarrier(self): + """ + Perform a synchronization barrier in case of parallel run with MPI. + """ + + if self.have_MPI: + self.comm.barrier() + + def checkMPI(self): + """ + Return the MPI characteristics of the problem + """ + + if self.have_MPI: + myid = self.comm.Get_rank() + MPIsize = self.comm.Get_size() + else: + myid = 0 + MPIsize = 1 + + return myid, MPIsize + + def connect(self, FSI_config, FluidSolver, SolidSolver): + """ + Connection between solvers. + Creates the communication support between the two solvers. + Gets information about f/s interfaces from the two solvers. + """ + + # Recover the process and the size of the parallelization + myid, MPIsize = self.checkMPI() + + ######################################################################################## + # --- Identify the fluid interface and store the number of nodes for each partition ---# + ######################################################################################## + self.fluidInterfaceIdentifier = None + self.nLocalFluidInterfaceNodes = 0 + if FluidSolver != None: + print('Fluid solver is initialized on process {}'.format(myid)) + self.haveFluidSolver = True + allInterfaceMarkersTags = FluidSolver.GetAllDeformMeshMarkersTag() + allMarkersID = FluidSolver.GetAllBoundaryMarkers() + if not allInterfaceMarkersTags: + raise Exception('No moving marker was defined in SU2.') + else: + if allInterfaceMarkersTags[0] in allMarkersID.keys(): + self.fluidInterfaceIdentifier = allMarkersID[allInterfaceMarkersTags[0]] + if self.fluidInterfaceIdentifier != None: + self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier) + if self.nLocalFluidInterfaceNodes != 0: + self.haveFluidInterface = True + print('Number of interface fluid nodes (halo nodes included) on proccess {} and marker {}: {}'\ + .format(myid,allInterfaceMarkersTags[0],self.nLocalFluidInterfaceNodes)) + else: + pass + + ##################################################################################### + # --- Identify the solid interface and store the number of nodes (single core) ---# + ##################################################################################### + if SolidSolver != None: + print('Solid solver is initialized on process {}'.format(myid)) + self.haveSolidSolver = True + self.nSolidInterfaceNodes = SolidSolver.nPoint + self.nSolidInterfacePhysicalNodes = SolidSolver.nPoint + self.nLocalSolidInterfaceNodes = SolidSolver.nPoint + self.globalSolidCoordinates = np.zeros((SolidSolver.nPoint, 3)) + for iPoint in range(0, SolidSolver.nPoint): + coordX, coordY, coordZ = SolidSolver.GetInitialCoordinates(iPoint) + self.globalSolidCoordinates[iPoint, 0] = coordX + self.globalSolidCoordinates[iPoint, 1] = coordY + self.globalSolidCoordinates[iPoint, 2] = coordZ + else: + pass + + ################################################# + # --- Exchange information about processors --- # + ################################################# + if self.have_MPI: + if self.haveFluidSolver: + sendBufFluid = np.array(int(1)) + else: + sendBufFluid = np.array(int(0)) + if self.haveFluidInterface: + sendBufFluidInterface = np.array(int(1)) + else: + sendBufFluidInterface = np.array(int(0)) + rcvBufFluid = np.zeros(MPIsize, dtype=int) + rcvBufFluidInterface = np.zeros(MPIsize, dtype=int) + self.comm.Allgather(sendBufFluid, rcvBufFluid) + self.comm.Allgather(sendBufFluidInterface, rcvBufFluidInterface) + + for iProc in range(MPIsize): + if rcvBufFluid[iProc] == 1: + self.fluidSolverProcessors.append(iProc) + if rcvBufFluidInterface[iProc] == 1: + self.fluidInterfaceProcessors.append(iProc) + + del sendBufFluid, rcvBufFluid, sendBufFluidInterface, rcvBufFluidInterface + else: + self.fluidSolverProcessors.append(0) + self.fluidInterfaceProcessors.append(0) + + self.MPIBarrier() + + # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) + # --- Calculate the number of halo nodes on each partition + self.nLocalFluidInterfaceHaloNode = 0 + for iVertex in range(self.nLocalFluidInterfaceNodes): + if FluidSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True: + GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex) + self.FluidHaloNodeList[GlobalIndex] = iVertex + self.nLocalFluidInterfaceHaloNode += 1 + + # Calculate the number of physical (= not halo) nodes on each partition + self.nLocalFluidInterfacePhysicalNodes = self.nLocalFluidInterfaceNodes - self.nLocalFluidInterfaceHaloNode + if self.have_MPI == True: + self.FluidHaloNodeList = self.comm.allgather(self.FluidHaloNodeList) + else: + self.FluidHaloNodeList = [{}] + + # --- Calculate the total number of nodes (with and without halo) at the fluid interface (sum over all the partitions) and broadcast the number accross all processors --- + sendBuffHalo = np.array(int(self.nLocalFluidInterfaceNodes)) + sendBuffPhysical = np.array(int(self.nLocalFluidInterfacePhysicalNodes)) + rcvBuffHalo = np.zeros(1, dtype=int) + rcvBuffPhysical = np.zeros(1, dtype=int) + if self.have_MPI: + self.comm.barrier() + self.comm.Allreduce(sendBuffHalo, rcvBuffHalo, op=self.MPI.SUM) + self.comm.Allreduce(sendBuffPhysical, rcvBuffPhysical, op=self.MPI.SUM) + self.nFluidInterfaceNodes = rcvBuffHalo[0] + self.nFluidInterfacePhysicalNodes = rcvBuffPhysical[0] + else: + self.nFluidInterfaceNodes = np.copy(sendBuffHalo) + self.nFluidInterfacePhysicalNodes = np.copy(sendBuffPhysical) + del sendBuffHalo, rcvBuffHalo, sendBuffPhysical, rcvBuffPhysical + + # --- Store the number of physical interface nodes on each processor and allgather the information --- + self.fluidPhysicalInterfaceNodesDistribution = np.zeros(MPIsize, dtype=int) + if self.have_MPI: + sendBuffPhysical = np.array(int(self.nLocalFluidInterfacePhysicalNodes)) + self.comm.Allgather(sendBuffPhysical, self.fluidPhysicalInterfaceNodesDistribution) + del sendBuffPhysical + else: + self.fluidPhysicalInterfaceNodesDistribution[0] = self.nFluidInterfacePhysicalNodes + + # --- Calculate and store the global indexing of interface physical nodes on each processor and allgather the information --- + if self.have_MPI: + if myid in self.fluidInterfaceProcessors: + globalIndexStart = 0 + for iProc in range(myid): + globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc] + globalIndexStop = globalIndexStart + self.nLocalFluidInterfacePhysicalNodes - 1 + else: + globalIndexStart = 0 + globalIndexStop = 0 + self.fluidGlobalIndexRange[myid] = [globalIndexStart, globalIndexStop] + self.fluidGlobalIndexRange = self.comm.allgather(self.fluidGlobalIndexRange) + else: + temp = {} + temp[0] = [0, self.nLocalFluidInterfacePhysicalNodes - 1] + self.fluidGlobalIndexRange = list() + self.fluidGlobalIndexRange.append(temp) + + + self.MPIPrint( + 'Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes)) + self.MPIPrint( + 'Total number of physical fluid interface nodes : {}'.format(self.nFluidInterfacePhysicalNodes)) + self.MPIPrint( + 'Total number of beam interface nodes : {}'.format(self.nSolidInterfaceNodes)) + self.MPIPrint('Total number of fluid interface nodes : {}'.format(self.nFluidInterfacePhysicalNodes)) + self.MPIPrint('Total number of solid interface nodes : {}'.format(self.nSolidInterfacePhysicalNodes)) + + self.MPIBarrier() + + # --- Get, for the fluid interface on each partition: + # --- The vertex indices, which are stored locally on each processor + # --- The coordinates X, Y and Z, which are stored in a global list in processor 0 + GlobalIndex = int() + localIndex = 0 + fluidIndexing_temp = {} + self.localFluidInterface_vertex_indices = np.zeros(self.nLocalFluidInterfacePhysicalNodes, dtype=int) + localFluidInterface_array_X_init = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidInterface_array_Y_init = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidInterface_array_Z_init = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + + for iVertex in range(self.nLocalFluidInterfaceNodes): + GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex) + posx, posy, posz = FluidSolver.GetVertex_UndeformedCoord(self.fluidInterfaceIdentifier, iVertex) + + if GlobalIndex in self.FluidHaloNodeList[myid].keys(): + self.haloNodesPositionsInit[GlobalIndex] = (posx, posy, posz) + else: + fluidIndexing_temp[GlobalIndex] = self.__getGlobalIndex('fluid', myid, localIndex) + localFluidInterface_array_X_init[localIndex] = posx + localFluidInterface_array_Y_init[localIndex] = posy + localFluidInterface_array_Z_init[localIndex] = posz + self.localFluidInterface_vertex_indices[localIndex] = int(iVertex) + localIndex += 1 + + if self.have_MPI: + fluidIndexing_temp = self.comm.allgather(fluidIndexing_temp) + for ii in range(len(fluidIndexing_temp)): + for key, value in fluidIndexing_temp[ii].items(): + self.fluidIndexing[key] = value + + # --- Collect local array sizes using gather in root process + bufXCoor = np.array(localFluidInterface_array_X_init) + bufYCoor = np.array(localFluidInterface_array_Y_init) + bufZCoor = np.array(localFluidInterface_array_Z_init) + self.sendCounts = np.array(self.comm.gather(self.nLocalFluidInterfacePhysicalNodes, 0)) + + if myid == self.rootProcess: + print("sendCounts: {}, total: {}".format(self.sendCounts, sum(self.sendCounts))) + self.globalFluidInterfaceXcoor = np.empty(sum(self.sendCounts)) + self.globalFluidInterfaceYcoor = np.empty(sum(self.sendCounts)) + self.globalFluidInterfaceZcoor = np.empty(sum(self.sendCounts)) + + self.comm.Gatherv(sendbuf=bufXCoor, recvbuf=(self.globalFluidInterfaceXcoor, self.sendCounts), root=0) + self.comm.Gatherv(sendbuf=bufYCoor, recvbuf=(self.globalFluidInterfaceYcoor, self.sendCounts), root=0) + self.comm.Gatherv(sendbuf=bufZCoor, recvbuf=(self.globalFluidInterfaceZcoor, self.sendCounts), root=0) + + else: + self.fluidIndexing = fluidIndexing_temp.copy() + self.globalFluidInterfaceXcoor = localFluidInterface_array_X_init.copy() + self.globalFluidInterfaceYcoor = localFluidInterface_array_Y_init.copy() + self.globalFluidInterfaceZcoor = localFluidInterface_array_Z_init.copy() + + # Store the global fluid coordinates + if myid == self.rootProcess: + self.globalFluidCoordinates = np.zeros((self.nFluidInterfacePhysicalNodes, 3)) + for i in range(0, self.nFluidInterfacePhysicalNodes): + self.globalFluidCoordinates[i][0] = self.globalFluidInterfaceXcoor[i] + self.globalFluidCoordinates[i][1] = self.globalFluidInterfaceYcoor[i] + self.globalFluidCoordinates[i][2] = self.globalFluidInterfaceZcoor[i] + + del fluidIndexing_temp, localFluidInterface_array_X_init, \ + localFluidInterface_array_Y_init, localFluidInterface_array_Z_init + + ################################################################################################### + # Initialize the local load array + # The initial displacements are set to 0 (this might be changed when we have restart capabilities) + ################################################################################################### + + self.globalFluidLoadX = np.zeros(self.nFluidInterfacePhysicalNodes) + self.globalFluidLoadY = np.zeros(self.nFluidInterfacePhysicalNodes) + self.globalFluidLoadZ = np.zeros(self.nFluidInterfacePhysicalNodes) + + if self.haveSolidSolver: + + self.globalSolidLoadX = np.zeros(self.nSolidInterfaceNodes) + self.globalSolidLoadY = np.zeros(self.nSolidInterfaceNodes) + self.globalSolidLoadZ = np.zeros(self.nSolidInterfaceNodes) + + self.globalSolidDispX = np.zeros(self.nSolidInterfaceNodes) + self.globalSolidDispY = np.zeros(self.nSolidInterfaceNodes) + self.globalSolidDispZ = np.zeros(self.nSolidInterfaceNodes) + + self.globalSolidLoadSensX = np.zeros(self.nSolidInterfaceNodes) + self.globalSolidLoadSensY = np.zeros(self.nSolidInterfaceNodes) + self.globalSolidLoadSensZ = np.zeros(self.nSolidInterfaceNodes) + + self.globalDispSolidAdjointX = np.zeros(self.nSolidInterfaceNodes) + self.globalDispSolidAdjointY = np.zeros(self.nSolidInterfaceNodes) + self.globalDispSolidAdjointZ = np.zeros(self.nSolidInterfaceNodes) + + def transferFluidTractions(self, FluidSolver, SolidSolver, MLSSolver): + """ + Transfer fluid tractions. + Gathers the fluid tractions from the interface into the root process. + Interpolates the tractions using the transposed matrix. + Applies the tractions into the solid solver. + """ + + # Recover the process and the size of the parallelization + myid, MPIsize = self.checkMPI() + + ################################################################################################################ + # --- STEP 1: Retrieve the fluid loads + # --- Get, for the fluid interface on each partition: + # --- The vertex indices, which are stored locally on each processor + # --- The coordinates X, Y and Z, which are stored in a global list in processor 0 + ################################################################################################################ + + # Initialize the local load array + localFluidLoadX = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidLoadY = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidLoadZ = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + + localIndex = 0 + # For the vertices that belong to the interface + for iVertex in self.localFluidInterface_vertex_indices: + # Compute the vertex forces on the fluid solver + loadX, loadY, loadZ = FluidSolver.GetFlowLoad(self.fluidInterfaceIdentifier, int(iVertex)) + # Store them in the local load array + localFluidLoadX[localIndex] = loadX + localFluidLoadY[localIndex] = loadY + localFluidLoadZ[localIndex] = loadZ + #print(iVertex, localFluidLoadX[localIndex], localFluidLoadY[localIndex], localFluidLoadZ[localIndex]) + localIndex += 1 + + if self.have_MPI: + + # Store the local loads in buffers in the form of numpy arrays + bufXLoad = np.array(localFluidLoadX) + bufYLoad = np.array(localFluidLoadY) + bufZLoad = np.array(localFluidLoadZ) + + # Initialize the global load array + if myid == self.rootProcess: + self.globalFluidLoadX = np.empty(sum(self.sendCounts)) + self.globalFluidLoadY = np.empty(sum(self.sendCounts)) + self.globalFluidLoadZ = np.empty(sum(self.sendCounts)) + + # Gatherv using self.sendCounts maintains the ordering of the coordinates + self.comm.Gatherv(sendbuf=bufXLoad, recvbuf=(self.globalFluidLoadX, self.sendCounts), root=0) + self.comm.Gatherv(sendbuf=bufYLoad, recvbuf=(self.globalFluidLoadY, self.sendCounts), root=0) + self.comm.Gatherv(sendbuf=bufZLoad, recvbuf=(self.globalFluidLoadZ, self.sendCounts), root=0) + + else: + self.globalFluidLoadX = localFluidLoadX.copy() + self.globalFluidLoadY = localFluidLoadY.copy() + self.globalFluidLoadZ = localFluidLoadZ.copy() + + # Delete local variables + del localFluidLoadX, localFluidLoadY, localFluidLoadZ + + ################################################################################################################ + # --- STEP 2: Interpolate + ################################################################################################################ + + # ---> Input: self.globalFluidLoadX, self.globalFluidLoadY, self.globalFluidLoadZ + + if myid == self.rootProcess: + self.globalSolidLoadX = MLSSolver.interpolation_matrix.transpose().dot(self.globalFluidLoadX) + self.globalSolidLoadY = MLSSolver.interpolation_matrix.transpose().dot(self.globalFluidLoadY) + self.globalSolidLoadZ = MLSSolver.interpolation_matrix.transpose().dot(self.globalFluidLoadZ) + + # ---> Output: self.globalSolidLoadX, self.globalSolidLoadY, self.globalSolidLoadZ + + ################################################################################################################ + # --- STEP 3: Check conservation + ################################################################################################################ + + # --- Check for total force conservation after interpolation + if myid == self.rootProcess: + + # Total loads before interpolation, fluid side + FFX = self.globalFluidLoadX.sum() + FFY = self.globalFluidLoadY.sum() + FFZ = self.globalFluidLoadZ.sum() + + # Total loads after interpolation, solid side + FX = self.globalSolidLoadX.sum() + FY = self.globalSolidLoadY.sum() + FZ = self.globalSolidLoadZ.sum() + + self.MPIPrint("Checking f/s interface total force...") + self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ)) + self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ)) + + ################################################################################################################ + # --- STEP 4: Transfer to the structural solver + # --- pyBeam runs in single core, so there is no need to deal with the parallelization + ################################################################################################################ + + if self.haveSolidSolver: + # For the vertices that belong to the interface + for iVertex in range(0, self.nSolidInterfaceNodes): + # Store them in the solid solver directly + SolidSolver.SetLoads(iVertex, self.globalSolidLoadX[iVertex], + self.globalSolidLoadY[iVertex], + self.globalSolidLoadZ[iVertex]) + + + + def transferDisplacementAdjoint_SourceTerm(self, FSIConfig, FluidSolver, SolidSolver, MLSSolver): + """ + Transfer fluid tractions. + Gathers the fluid tractions from the interface into the root process. + Interpolates the tractions using the transposed matrix. + Applies the tractions into the solid solver. + """ + + # Recover the process and the size of the parallelization + myid, MPIsize = self.checkMPI() + + ################################################################################################################ + # --- STEP 0: Store old displacement adjoints for relaxation + ################################################################################################################ + + if self.haveSolidSolver: + + # Store the old displacements + self.globalDispSolidAdjointXOld = self.globalDispSolidAdjointX.copy() + self.globalDispSolidAdjointYOld = self.globalDispSolidAdjointY.copy() + self.globalDispSolidAdjointZOld = self.globalDispSolidAdjointZ.copy() + + # Initialize the local load array + localDispSolidAdjointX = np.zeros(self.nSolidInterfaceNodes) + localDispSolidAdjointY = np.zeros(self.nSolidInterfaceNodes) + localDispSolidAdjointZ = np.zeros(self.nSolidInterfaceNodes) + + ################################################################################################################ + # --- STEP 1: Retrieve the fluid loads + # --- Get, for the fluid interface on each partition: + # --- The vertex indices, which are stored locally on each processor + # --- The coordinates X, Y and Z, which are stored in a global list in processor 0 + ################################################################################################################ + + # Initialize the local load array + localDispFlowAdjointX = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localDispFlowAdjointY = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localDispFlowAdjointZ = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + + localIndex = 0 + # For the vertices that belong to the interface + for iVertex in self.localFluidInterface_vertex_indices: + # Compute the vertex forces on the fluid solver + FluidSolver.ComputeVertexForces(self.fluidInterfaceIdentifier, int(iVertex)) + sensX, sensY, sensZ = FluidSolver.GetMeshDisp_Sensitivity(self.fluidInterfaceIdentifier, int(iVertex)) + # Store them in the local load array + localDispFlowAdjointX[localIndex] = sensX + localDispFlowAdjointY[localIndex] = sensY + localDispFlowAdjointZ[localIndex] = sensZ + localIndex += 1 + + if self.have_MPI: + + # Store the local loads in buffers in the form of numpy arrays + bufXLoad = np.array(localDispFlowAdjointX) + bufYLoad = np.array(localDispFlowAdjointY) + bufZLoad = np.array(localDispFlowAdjointZ) + + # Initialize the global load array + if myid == self.rootProcess: + self.globalDispFlowAdjointX = np.empty(sum(self.sendCounts)) + self.globalDispFlowAdjointY = np.empty(sum(self.sendCounts)) + self.globalDispFlowAdjointZ = np.empty(sum(self.sendCounts)) + + # Gatherv using self.sendCounts maintains the ordering of the coordinates + self.comm.Gatherv(sendbuf=bufXLoad, recvbuf=(self.globalDispFlowAdjointX, self.sendCounts), root=0) + self.comm.Gatherv(sendbuf=bufYLoad, recvbuf=(self.globalDispFlowAdjointY, self.sendCounts), root=0) + self.comm.Gatherv(sendbuf=bufZLoad, recvbuf=(self.globalDispFlowAdjointZ, self.sendCounts), root=0) + + else: + self.globalDispFlowAdjointX = localDispFlowAdjointX.copy() + self.globalDispFlowAdjointY = localDispFlowAdjointY.copy() + self.globalDispFlowAdjointZ = localDispFlowAdjointZ.copy() + + ################################################################################################################ + # --- STEP 2: Interpolate + ################################################################################################################ + + # ---> Input: self.globalFluidLoadX, self.globalFluidLoadY, self.globalFluidLoadZ + + if myid == self.rootProcess: + self.globalDispSolidAdjointX = MLSSolver.interpolation_matrix.transpose().dot(self.globalDispFlowAdjointX) + self.globalDispSolidAdjointY = MLSSolver.interpolation_matrix.transpose().dot(self.globalDispFlowAdjointY) + self.globalDispSolidAdjointZ = MLSSolver.interpolation_matrix.transpose().dot(self.globalDispFlowAdjointZ) + + # ---> Output: self.globalSolidLoadX, self.globalSolidLoadY, self.globalSolidLoadZ + + ################################################################################################################ + # --- STEP 3: Transfer to the structural solver + # --- pyBeam runs in single core, so there is no need to deal with the parallelization + ################################################################################################################ + + if self.haveSolidSolver: + + # Recover the relaxation parameter + relaxParam = float(FSIConfig["RELAX_PARAM"]) + + # For the vertices that belong to the interface + for iVertex in range(0, self.nSolidInterfaceNodes): + localDispSolidAdjointX[iVertex] = relaxParam * self.globalDispSolidAdjointX[iVertex] + \ + (1.0 - relaxParam) * self.globalDispSolidAdjointXOld[iVertex] + localDispSolidAdjointY[iVertex] = relaxParam * self.globalDispSolidAdjointY[iVertex] + \ + (1.0 - relaxParam) * self.globalDispSolidAdjointYOld[iVertex] + localDispSolidAdjointZ[iVertex] = relaxParam * self.globalDispSolidAdjointZ[iVertex] + \ + (1.0 - relaxParam) * self.globalDispSolidAdjointZOld[iVertex] + # Store them in the solid solver directly + SolidSolver.SetDisplacementAdjoint(iVertex, localDispSolidAdjointX[iVertex], + localDispSolidAdjointY[iVertex], + localDispSolidAdjointZ[iVertex]) + + # Delete local variables + del localDispFlowAdjointX, localDispFlowAdjointY, localDispFlowAdjointZ + + if self.haveSolidSolver: + del localDispSolidAdjointX, localDispSolidAdjointY, localDispSolidAdjointZ + + + def transferFlowLoadAdjoint(self, FSIConfig, FluidSolver, SolidSolver, MLSSolver): + """ + Transfer adjoints of the flow loads. + Gathers the adjoints from the interface. + Interpolates the displacements using the transposed matrix. + Applies the fluid displacements by scattering them into the correct position for the fluid solver. + """ + + # Recover the process and the size of the parallelization + myid, MPIsize = self.checkMPI() + + ################################################################################################################ + # --- STEP 1: Retrieve the structural displacements from pyBeam and apply the relaxation + # --- pyBeam runs in single core, so there is no need to deal with the parallelization + ################################################################################################################ + + if self.haveSolidSolver: + + # Initialize local vectors, to store the relaxed displacements + loadSensX = np.zeros(self.nSolidInterfaceNodes) + loadSensY = np.zeros(self.nSolidInterfaceNodes) + loadSensZ = np.zeros(self.nSolidInterfaceNodes) + + # For the vertices that belong to the interface + for iVertex in range(0, self.nSolidInterfaceNodes): + + # Store the new displacements in the global load array directly + sensX, sensY, sensZ = SolidSolver.GetLoadAdjoint(iVertex) + self.globalSolidLoadSensX[iVertex] = sensX + self.globalSolidLoadSensY[iVertex] = sensY + self.globalSolidLoadSensZ[iVertex] = sensZ + + ################################################################################################################ + # --- STEP 2: Interpolate + ################################################################################################################ + + # ---> Input: relaxedSolidDispX, relaxedSolidDispY, relaxedSolidDispZ + + if myid == self.rootProcess: + + self.globalFluidLoadSensX = MLSSolver.interpolation_matrix.dot(self.globalSolidLoadSensX) + self.globalFluidLoadSensY = MLSSolver.interpolation_matrix.dot(self.globalSolidLoadSensY) + self.globalFluidLoadSensZ = MLSSolver.interpolation_matrix.dot(self.globalSolidLoadSensZ) + + # ---> Output: self.globalFluidDispX, self.globalFluidDispY, self.globalFluidDispZ + + ################################################################################################################ + # --- STEP 3: Transfer to the fluid solver + ################################################################################################################ + + # --- Recover them from the interpolated vectors + localFluidLoadSensX = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidLoadSensY = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidLoadSensZ = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + + if self.have_MPI: + + self.comm.Scatterv(sendbuf=(self.globalFluidLoadSensX, self.sendCounts), recvbuf=localFluidLoadSensX, root=0) + self.comm.Scatterv(sendbuf=(self.globalFluidLoadSensY, self.sendCounts), recvbuf=localFluidLoadSensY, root=0) + self.comm.Scatterv(sendbuf=(self.globalFluidLoadSensZ, self.sendCounts), recvbuf=localFluidLoadSensZ, root=0) + + else: + localFluidLoadSensX = self.globalFluidLoadSensX.copy() + localFluidLoadSensY = self.globalFluidLoadSensY.copy() + localFluidLoadSensZ = self.globalFluidLoadSensZ.copy() + + # For the vertices that belong to the interface + localIndex = 0 + for iVertex in self.localFluidInterface_vertex_indices: + # Store them in the mesh displacement routine + FluidSolver.SetFlowLoad_Adjoint(self.fluidInterfaceIdentifier, int(iVertex), + localFluidLoadSensX[localIndex], + localFluidLoadSensY[localIndex], + localFluidLoadSensZ[localIndex]) + # Increment the local index + localIndex += 1 + + # Delete local variables + del localFluidLoadSensX, localFluidLoadSensY, localFluidLoadSensZ + if myid == self.rootProcess: + del loadSensX, loadSensY, loadSensZ + + def SteadyFSI(self, FSIconfig, FluidSolver, SolidSolver, MLSSolver): + """ + Runs the steady FSI computation + Synchronizes the fluid and solid solver with data exchange at the f/s interface. + """ + + # Recover the process and the size of the parallelization + myid, MPIsize = self.checkMPI() + + # --- Set some general variables for the steady computation --- # + nFSIIter = FSIconfig['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step) + + self.MPIPrint('\n********************************') + self.MPIPrint('* Begin steady FSI computation *') + self.MPIPrint('********************************\n') + + self.MPIPrint('\n*********** Enter Block Gauss Seidel (BGS) method for strong coupling adjoint FSI ************') + + # --- External FSI loop --- # + self.FSIIter = 0 + + # Preprocess only done once at the beginning + FluidSolver.Preprocess(0) # Time iteration pre-processing + + # For the number of iterations allowed + while self.FSIIter < nFSIIter: + + self.MPIPrint("\n>>>> Adjoint FSI iteration {} <<<<".format(self.FSIIter)) + + if self.FSIIter > 0: + # --- Surface displacements interpolation and communication ---# + self.MPIPrint('\n##### Transferring fluid traction adjoint\n') + self.MPIBarrier() + self.transferFlowLoadAdjoint(FSIconfig, FluidSolver, SolidSolver, MLSSolver) + + # --- Fluid solver call for FSI subiteration --- # + self.MPIPrint('\n##### Launching fluid solver for a steady computation\n') + self.MPIBarrier() + FluidSolver.ResetConvergence() # Make sure the solver starts convergence from 0 + FluidSolver.MainRecording() # Run the recording of the main variables + FluidSolver.Run() # Run one time-step (static: one simulation) + FluidSolver.Postprocess() # Run the recording of the geometrical variables + FluidSolver.Update() # Update the solver for the next time iteration + FluidSolver.Monitor(0) # Monitor the solver and output solution to file if required + FluidSolver.Output(0) # Output the solution to file + + # --- Surface fluid loads interpolation and communication ---# + self.MPIPrint('\n##### Transferring fluid tractions to the beam solver\n') + self.MPIBarrier() + self.transferFluidTractions(FluidSolver, SolidSolver, MLSSolver) + + # --- Surface fluid loads interpolation and communication ---# + self.MPIPrint('\n##### Transferring displacement adjoint to the beam solver\n') + self.MPIBarrier() + self.transferDisplacementAdjoint_SourceTerm(FSIconfig, FluidSolver, SolidSolver, MLSSolver) + + # --- Solid solver call for FSI subiteration --- # + + self.MPIBarrier() + if self.haveSolidSolver: + self.MPIPrint('\n##### Recording the pyBeam solution process\n') + SolidSolver.RecordSolver() + self.MPIPrint('\n##### Running the adjoint\n') + SolidSolver.RunAdjoint() + + self.FSIIter += 1 + + self.MPIBarrier() + + self.MPIPrint('\nBGS is converged (strong coupling)') + self.MPIPrint(' ') + self.MPIPrint('*************************') + self.MPIPrint('* End FSI computation *') + self.MPIPrint('*************************') + self.MPIPrint(' ') + + def __getGlobalIndex(self, physics, iProc, iLocalVertex): + """ + Calculate the global indexing of interface nodes accross all the partitions. This does not include halo nodes. + """ + + if physics == 'fluid': + globalStartIndex = self.fluidGlobalIndexRange[iProc][iProc][0] + elif physics == 'solid': + globalStartIndex = self.solidGlobalIndexRange[iProc][iProc][0] + + globalIndex = globalStartIndex + iLocalVertex + + return globalIndex diff --git a/SU2_PY/SU2_FSI/FSI_config.py b/SU2_PY/SU2_FSI/FSI_config.py new file mode 100644 index 000000000000..5266ea1e0357 --- /dev/null +++ b/SU2_PY/SU2_FSI/FSI_config.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python + +## \file FSI_config.py +# \brief Python class for handling configuration file for FSI computation. +# \author David Thomas, Rocco Bombardieri +# \version 7.0.8 "Blackbird" +# +# SU2 Project Website: https://su2code.github.io +# +# The SU2 Project is maintained by the SU2 Foundation +# (http://su2foundation.org) +# +# Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md) +# +# SU2 is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# SU2 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with SU2. If not, see . + +# ---------------------------------------------------------------------- +# Imports +# ---------------------------------------------------------------------- + +from .switch import switch + +# ---------------------------------------------------------------------- +# FSI Configuration Class +# ---------------------------------------------------------------------- + +class FSIConfig: + """ + Class that contains all the parameters coming from the FSI configuration file. + Read the file and store all the options into a dictionary. + """ + + def __init__(self, FileName): + self.ConfigFileName = FileName + self._ConfigContent = {} + self.readConfig() + + def __str__(self): + tempString = str() + for key, value in self._ConfigContent.items(): + tempString += "{} = {}\n".format(key, value) + return tempString + + def __getitem__(self, key): + return self._ConfigContent[key] + + def __setitem__(self, key, value): + self._ConfigContent[key] = value + + def readConfig(self): + input_file = open(self.ConfigFileName) + while 1: + line = input_file.readline() + if not line: + break + # remove line returns + line = line.strip('\r\n') + # make sure it has useful data + if (not "=" in line) or (line[0] == '%'): + continue + # split across equal sign + line = line.split("=", 1) + this_param = line[0].strip() + this_value = line[1].strip() + + for case in switch(this_param): + # integer values + if case("RESTART_ITER"): pass + if case("NDIM"): pass + if case("NB_EXT_ITER"): pass + if case("NB_FSI_ITER"): + self._ConfigContent[this_param] = int(this_value) + break + + # float values + if case("FSI_TOLERANCE"): + self._ConfigContent[this_param] = float(this_value) + break + if case("RELAX_PARAM"): + self._ConfigContent[this_param] = float(this_value) + break + + # string values + if case("SU2_CONFIG"): pass + if case("PYBEAM_CONFIG"): pass + if case("MLS_CONFIG_FILE_NAME"): pass + if case("INTERNAL_FLOW"): + self._ConfigContent[this_param] = this_value + break + + if case(): + print(this_param + " is an invalid option !") + # end for + + # def dump() diff --git a/SU2_PY/SU2_FSI/PrimalInterface.py b/SU2_PY/SU2_FSI/PrimalInterface.py new file mode 100644 index 000000000000..6dd9c62fc036 --- /dev/null +++ b/SU2_PY/SU2_FSI/PrimalInterface.py @@ -0,0 +1,764 @@ +#!/usr/bin/env python + +## \file Interface.py +# \brief Interface class that handles fluid/solid solvers synchronisation and communication. +# \author Rocco Bombardieri and Ruben Sanchez based on previous work by David Thomas. +# \version 7.0.8 "Blackbird" +# +# SU2 Project Website: https://su2code.github.io +# +# The SU2 Project is maintained by the SU2 Foundation +# (http://su2foundation.org) +# +# Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md) +# +# SU2 is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# SU2 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with SU2. If not, see . + +# ---------------------------------------------------------------------- +# Imports +# ---------------------------------------------------------------------- + + +import numpy as np +import shutil +import os +# ---------------------------------------------------------------------- +# FSI Interface Class +# ---------------------------------------------------------------------- + + +class Interface: + """ + FSI interface class that handles fluid/solid solvers synchronisation and communication + """ + + def __init__(self, FSI_config, FluidSolver, SolidSolver, MLS_Solver, have_MPI): + """ + Class constructor. Declare some variables and do some screen outputs. + """ + + if have_MPI: + from mpi4py import MPI + self.MPI = MPI + self.comm = MPI.COMM_WORLD # MPI World communicator + self.have_MPI = True + myid = self.comm.Get_rank() + else: + self.comm = 0 + self.have_MPI = False + myid = 0 + + self.rootProcess = 0 #the root process is chosen to be MPI rank = 0 + + self.nDim = FSI_config['NDIM'] #problem dimension + + self.haveFluidSolver = False #True if the fluid solver is initialized on the current rank + self.haveSolidSolver = False #True if the solid solver is initialized on the current rank + self.haveFluidInterface = False #True if the current rank owns at least one fluid interface node + self.haveSolidInterface = False #True if the current rank owns at least one solid interface node + + self.fluidSolverProcessors = list() #list of partitions where the fluid solver is initialized + self.solidSolverProcessors = list() #list of partitions where the solid solver is initialized + self.fluidInterfaceProcessors = list() #list of partitions where there are fluid interface nodes + self.solidInterfaceProcessors = list() #list of partitions where there are solid interface nodes + + self.fluidInterfaceIdentifier = None #object that can identify the f/s interface within the fluid solver + self.solidInterfaceIdentifier = None #object that can identify the f/s interface within the solid solver + + self.fluidGlobalIndexRange = {} #contains the global FSI indexing of each fluid interface node for all partitions + self.solidGlobalIndexRange = {} #contains the global FSI indexing of each solid interface node for all partitions + + self.FluidHaloNodeList = {} #contains the the indices (fluid solver indexing) of the halo nodes for each partition + self.fluidIndexing = {} #links between the fluid solver indexing and the FSI indexing for the interface nodes + self.SolidHaloNodeList = {} #contains the the indices (solid solver indexing) of the halo nodes for each partition + self.solidIndexing = {} #links between the solid solver indexing and the FSI indexing for the interface nodes + + self.nLocalFluidInterfaceNodes = 0 #number of nodes (halo nodes included) on the fluid interface, on each partition + self.nLocalFluidInterfaceHaloNode = 0 #number of halo nodes on the fluid intrface, on each partition + self.nLocalFluidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the fluid interface, on each partition + self.nFluidInterfaceNodes = 0 #number of nodes on the fluid interface, sum over all the partitions + self.nFluidInterfacePhysicalNodes = 0 #number of physical nodes on the fluid interface, sum over all partitions + + self.nLocalSolidInterfaceNodes = 0 #number of physical nodes on the solid interface, on each partition + self.nLocalSolidInterfaceHaloNode = 0 #number of halo nodes on the solid intrface, on each partition + self.nLocalSolidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the solid interface, on each partition + self.nSolidInterfaceNodes = 0 #number of nodes on the solid interface, sum over all partitions + self.nSolidInterfacePhysicalNodes = 0 #number of physical nodes on the solid interface, sum over all partitions + + self.localFluidInterface_vertex_indices = None + + self.globalFluidInterfaceXcoor = None + self.globalFluidInterfaceYcoor = None + self.globalFluidInterfaceZcoor = None + + self.globalFluidCoordinates = None + self.globalSolidCoordinates = None + + self.sendCounts = None + self.globalFluidDispX = None + self.globalFluidDispY = None + self.globalFluidDispZ = None + + self.globalSolidDispX = None + self.globalSolidDispY = None + self.globalSolidDispZ = None + + self.globalSolidDispXOld = None + self.globalSolidDispYOld = None + self.globalSolidDispZOld = None + + self.globalFluidLoadX = None + self.globalFluidLoadY = None + self.globalFluidLoadZ = None + + self.globalSolidLoadX = None + self.globalSolidLoadY = None + self.globalSolidLoadZ = None + + self.haloNodesPositionsInit = {} # initial position of the halo nodes (fluid side only) + + self.solidInterface_array_DispX = None # solid interface displacement + self.solidInterface_array_DispY = None + self.solidInterface_array_DispZ = None + + self.solidInterfaceResidual_array_X = None #solid interface position residual + self.solidInterfaceResidual_array_Y = None + self.solidInterfaceResidual_array_Z = None + + self.solidInterfaceResidualnM1_array_X = None #solid interface position residual at the previous BGS iteration + self.solidInterfaceResidualnM1_array_Y = None + self.solidInterfaceResidualnM1_array_Z = None + + self.fluidInterface_array_DispX = None #fluid interface displacement + self.fluidInterface_array_DispY = None + self.fluidInterface_array_DispZ = None + + self.fluidLoads_array_X = None #loads on the fluid side of the f/s interface + self.fluidLoads_array_Y = None + self.fluidLoads_array_Z = None + + self.solidLoads_array_X = None #loads on the solid side of the f/s interface + self.solidLoads_array_Y = None + self.solidLoads_array_Z = None + + self.FSIIter = 0 # current FSI iteration + self.unsteady = False # flag for steady or unsteady simulation (default is steady) + + # ---Some screen output --- + self.MPIPrint('Fluid solver : SU2_CFD') + self.MPIPrint('Solid solver : pyBeam') + self.MPIPrint('Steady coupled simulation') + self.MPIPrint('Matching fluid-solid interface using Moving Least Squares method') + self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER'])) + self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE'])) + self.MPIPrint('Static under-relaxation with constant parameter {}'.format(FSI_config['RELAX_PARAM'])) + self.MPIPrint('FSI interface is set') + + def MPIPrint(self, message): + """ + Print a message on screen only from the master process. + """ + + if self.have_MPI: + myid = self.comm.Get_rank() + else: + myid = 0 + + if myid == self.rootProcess: + print(message) + + def MPIBarrier(self): + """ + Perform a synchronization barrier in case of parallel run with MPI. + """ + + if self.have_MPI: + self.comm.barrier() + + def checkMPI(self): + """ + Return the MPI characteristics of the problem + """ + + if self.have_MPI: + myid = self.comm.Get_rank() + MPIsize = self.comm.Get_size() + else: + myid = 0 + MPIsize = 1 + + return myid, MPIsize + + def connect(self, FSI_config, FluidSolver, SolidSolver): + """ + Connection between solvers. + Creates the communication support between the two solvers. + Gets information about f/s interfaces from the two solvers. + """ + + # Recover the process and the size of the parallelization + myid, MPIsize = self.checkMPI() + + # --- Identify the fluid interface and store the number of nodes for each partition ---# + self.fluidInterfaceIdentifier = None + self.nLocalFluidInterfaceNodes = 0 + if FluidSolver != None: + print('Fluid solver is initialized on process {}'.format(myid)) + self.haveFluidSolver = True + allInterfaceMarkersTags = FluidSolver.GetAllDeformMeshMarkersTag() + allMarkersID = FluidSolver.GetAllBoundaryMarkers() + if not allInterfaceMarkersTags: + raise Exception('No moving marker was defined in SU2.') + else: + if allInterfaceMarkersTags[0] in allMarkersID.keys(): + self.fluidInterfaceIdentifier = allMarkersID[allInterfaceMarkersTags[0]] + if self.fluidInterfaceIdentifier != None: + self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier) + if self.nLocalFluidInterfaceNodes != 0: + self.haveFluidInterface = True + print('Number of interface fluid nodes (halo nodes included) on proccess {} and marker {}: {}'\ + .format(myid,allInterfaceMarkersTags[0],self.nLocalFluidInterfaceNodes)) + else: + pass + + # --- Identify the solid interface and store the number of nodes (single core) ---# + if SolidSolver != None: + print('Solid solver is initialized on process {}'.format(myid)) + self.haveSolidSolver = True + self.nSolidInterfaceNodes = SolidSolver.nPoint + self.nSolidInterfacePhysicalNodes = SolidSolver.nPoint + self.nLocalSolidInterfaceNodes = SolidSolver.nPoint + self.globalSolidCoordinates = np.zeros((SolidSolver.nPoint, 3)) + for iPoint in range(0, SolidSolver.nPoint): + coordX, coordY, coordZ = SolidSolver.GetInitialCoordinates(iPoint) + self.globalSolidCoordinates[iPoint, 0] = coordX + self.globalSolidCoordinates[iPoint, 1] = coordY + self.globalSolidCoordinates[iPoint, 2] = coordZ + else: + pass + + # --- Exchange information about processors --- # + if self.have_MPI: + if self.haveFluidSolver: + sendBufFluid = np.array(int(1)) + else: + sendBufFluid = np.array(int(0)) + if self.haveFluidInterface: + sendBufFluidInterface = np.array(int(1)) + else: + sendBufFluidInterface = np.array(int(0)) + rcvBufFluid = np.zeros(MPIsize, dtype=int) + rcvBufFluidInterface = np.zeros(MPIsize, dtype=int) + self.comm.Allgather(sendBufFluid, rcvBufFluid) + self.comm.Allgather(sendBufFluidInterface, rcvBufFluidInterface) + + for iProc in range(MPIsize): + if rcvBufFluid[iProc] == 1: + self.fluidSolverProcessors.append(iProc) + if rcvBufFluidInterface[iProc] == 1: + self.fluidInterfaceProcessors.append(iProc) + + del sendBufFluid, rcvBufFluid, sendBufFluidInterface, rcvBufFluidInterface + else: + self.fluidSolverProcessors.append(0) + self.fluidInterfaceProcessors.append(0) + + self.MPIBarrier() + + # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) + # --- Calculate the number of halo nodes on each partition + self.nLocalFluidInterfaceHaloNode = 0 + for iVertex in range(self.nLocalFluidInterfaceNodes): + if FluidSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True: + GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex) + self.FluidHaloNodeList[GlobalIndex] = iVertex + self.nLocalFluidInterfaceHaloNode += 1 + + # Calculate the number of physical (= not halo) nodes on each partition + self.nLocalFluidInterfacePhysicalNodes = self.nLocalFluidInterfaceNodes - self.nLocalFluidInterfaceHaloNode + if self.have_MPI == True: + self.FluidHaloNodeList = self.comm.allgather(self.FluidHaloNodeList) + else: + self.FluidHaloNodeList = [{}] + + # --- Calculate the total number of nodes (with and without halo) at the fluid interface (sum over all the partitions) and broadcast the number accross all processors --- + sendBuffHalo = np.array(int(self.nLocalFluidInterfaceNodes)) + sendBuffPhysical = np.array(int(self.nLocalFluidInterfacePhysicalNodes)) + rcvBuffHalo = np.zeros(1, dtype=int) + rcvBuffPhysical = np.zeros(1, dtype=int) + if self.have_MPI: + self.comm.barrier() + self.comm.Allreduce(sendBuffHalo, rcvBuffHalo, op=self.MPI.SUM) + self.comm.Allreduce(sendBuffPhysical, rcvBuffPhysical, op=self.MPI.SUM) + self.nFluidInterfaceNodes = rcvBuffHalo[0] + self.nFluidInterfacePhysicalNodes = rcvBuffPhysical[0] + else: + self.nFluidInterfaceNodes = np.copy(sendBuffHalo) + self.nFluidInterfacePhysicalNodes = np.copy(sendBuffPhysical) + del sendBuffHalo, rcvBuffHalo, sendBuffPhysical, rcvBuffPhysical + + # --- Store the number of physical interface nodes on each processor and allgather the information --- + self.fluidPhysicalInterfaceNodesDistribution = np.zeros(MPIsize, dtype=int) + if self.have_MPI: + sendBuffPhysical = np.array(int(self.nLocalFluidInterfacePhysicalNodes)) + self.comm.Allgather(sendBuffPhysical, self.fluidPhysicalInterfaceNodesDistribution) + del sendBuffPhysical + else: + self.fluidPhysicalInterfaceNodesDistribution[0] = self.nFluidInterfacePhysicalNodes + + # --- Calculate and store the global indexing of interface physical nodes on each processor and allgather the information --- + if self.have_MPI: + if myid in self.fluidInterfaceProcessors: + globalIndexStart = 0 + for iProc in range(myid): + globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc] + globalIndexStop = globalIndexStart + self.nLocalFluidInterfacePhysicalNodes - 1 + else: + globalIndexStart = 0 + globalIndexStop = 0 + self.fluidGlobalIndexRange[myid] = [globalIndexStart, globalIndexStop] + self.fluidGlobalIndexRange = self.comm.allgather(self.fluidGlobalIndexRange) + else: + temp = {} + temp[0] = [0, self.nLocalFluidInterfacePhysicalNodes - 1] + self.fluidGlobalIndexRange = list() + self.fluidGlobalIndexRange.append(temp) + + + self.MPIPrint( + 'Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes)) + self.MPIPrint( + 'Total number of physical fluid interface nodes : {}'.format(self.nFluidInterfacePhysicalNodes)) + self.MPIPrint( + 'Total number of beam interface nodes : {}'.format(self.nSolidInterfaceNodes)) + self.MPIPrint('Total number of fluid interface nodes : {}'.format(self.nFluidInterfacePhysicalNodes)) + self.MPIPrint('Total number of solid interface nodes : {}'.format(self.nSolidInterfacePhysicalNodes)) + + self.MPIBarrier() + + # --- Get, for the fluid interface on each partition: + # --- The vertex indices, which are stored locally on each processor + # --- The coordinates X, Y and Z, which are stored in a global list in processor 0 + GlobalIndex = int() + localIndex = 0 + fluidIndexing_temp = {} + self.localFluidInterface_vertex_indices = np.zeros(self.nLocalFluidInterfacePhysicalNodes, dtype=int) + localFluidInterface_array_X_init = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidInterface_array_Y_init = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidInterface_array_Z_init = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + + for iVertex in range(self.nLocalFluidInterfaceNodes): + GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex) + posx, posy, posz = FluidSolver.GetVertex_UndeformedCoord(self.fluidInterfaceIdentifier, iVertex) + + if GlobalIndex in self.FluidHaloNodeList[myid].keys(): + self.haloNodesPositionsInit[GlobalIndex] = (posx, posy, posz) + else: + fluidIndexing_temp[GlobalIndex] = self.__getGlobalIndex('fluid', myid, localIndex) + localFluidInterface_array_X_init[localIndex] = posx + localFluidInterface_array_Y_init[localIndex] = posy + localFluidInterface_array_Z_init[localIndex] = posz + self.localFluidInterface_vertex_indices[localIndex] = int(iVertex) + localIndex += 1 + + #print("rank: {}, local_vertex_indices: {}".format(myid, self.localFluidInterface_vertex_indices)) + + if self.have_MPI: + fluidIndexing_temp = self.comm.allgather(fluidIndexing_temp) + for ii in range(len(fluidIndexing_temp)): + for key, value in fluidIndexing_temp[ii].items(): + self.fluidIndexing[key] = value + + # --- Collect local array sizes using gather in root process + bufXCoor = np.array(localFluidInterface_array_X_init) + bufYCoor = np.array(localFluidInterface_array_Y_init) + bufZCoor = np.array(localFluidInterface_array_Z_init) + self.sendCounts = np.array(self.comm.gather(self.nLocalFluidInterfacePhysicalNodes, 0)) + + if myid == self.rootProcess: + print("sendCounts: {}, total: {}".format(self.sendCounts, sum(self.sendCounts))) + self.globalFluidInterfaceXcoor = np.empty(sum(self.sendCounts)) + self.globalFluidInterfaceYcoor = np.empty(sum(self.sendCounts)) + self.globalFluidInterfaceZcoor = np.empty(sum(self.sendCounts)) + + self.comm.Gatherv(sendbuf=bufXCoor, recvbuf=(self.globalFluidInterfaceXcoor, self.sendCounts), root=0) + self.comm.Gatherv(sendbuf=bufYCoor, recvbuf=(self.globalFluidInterfaceYcoor, self.sendCounts), root=0) + self.comm.Gatherv(sendbuf=bufZCoor, recvbuf=(self.globalFluidInterfaceZcoor, self.sendCounts), root=0) + #if myid == 0: + #print("Gathered array X: {}".format(self.globalFluidInterfaceXcoor)) + #print("Gathered array Y: {}".format(self.globalFluidInterfaceYcoor)) + #print("Gathered array Z: {}".format(self.globalFluidInterfaceZcoor)) + + else: + self.fluidIndexing = fluidIndexing_temp.copy() + self.globalFluidInterfaceXcoor = localFluidInterface_array_X_init.copy() + self.globalFluidInterfaceYcoor = localFluidInterface_array_Y_init.copy() + self.globalFluidInterfaceZcoor = localFluidInterface_array_Z_init.copy() + + # Store the global fluid coordinates + if myid == self.rootProcess: + self.globalFluidCoordinates = np.zeros((self.nFluidInterfacePhysicalNodes, 3)) + for i in range(0, self.nFluidInterfacePhysicalNodes): + self.globalFluidCoordinates[i][0] = self.globalFluidInterfaceXcoor[i] + self.globalFluidCoordinates[i][1] = self.globalFluidInterfaceYcoor[i] + self.globalFluidCoordinates[i][2] = self.globalFluidInterfaceZcoor[i] + + print(self.globalFluidCoordinates.shape) + print(self.globalSolidCoordinates.shape) + + del fluidIndexing_temp, localFluidInterface_array_X_init, \ + localFluidInterface_array_Y_init, localFluidInterface_array_Z_init + + ################################################################################################### + # Initialize the local load array + # The initial displacements are set to 0 (this might be changed when we have restart capabilities) + ################################################################################################### + + self.globalFluidLoadX = np.zeros(self.nFluidInterfacePhysicalNodes) + self.globalFluidLoadY = np.zeros(self.nFluidInterfacePhysicalNodes) + self.globalFluidLoadZ = np.zeros(self.nFluidInterfacePhysicalNodes) + + if self.haveSolidSolver: + + self.globalSolidLoadX = np.zeros(self.nSolidInterfaceNodes) + self.globalSolidLoadY = np.zeros(self.nSolidInterfaceNodes) + self.globalSolidLoadZ = np.zeros(self.nSolidInterfaceNodes) + + self.globalSolidDispX = np.zeros(self.nSolidInterfaceNodes) + self.globalSolidDispY = np.zeros(self.nSolidInterfaceNodes) + self.globalSolidDispZ = np.zeros(self.nSolidInterfaceNodes) + + def transferFluidTractions(self, FluidSolver, SolidSolver, MLSSolver): + """ + Transfer fluid tractions. + Gathers the fluid tractions from the interface into the root process. + Interpolates the tractions using the transposed matrix. + Applies the tractions into the solid solver. + """ + + # Recover the process and the size of the parallelization + myid, MPIsize = self.checkMPI() + + ################################################################################################################ + # --- STEP 1: Retrieve the fluid loads + # --- Get, for the fluid interface on each partition: + # --- The vertex indices, which are stored locally on each processor + # --- The coordinates X, Y and Z, which are stored in a global list in processor 0 + ################################################################################################################ + + # Initialize the local load array + localFluidLoadX = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidLoadY = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidLoadZ = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + + localIndex = 0 + # For the vertices that belong to the interface + for iVertex in self.localFluidInterface_vertex_indices: + # Compute the vertex forces on the fluid solver + loadX, loadY, loadZ = FluidSolver.GetFlowLoad(self.fluidInterfaceIdentifier, int(iVertex)) + # Store them in the local load array + localFluidLoadX[localIndex] = loadX + localFluidLoadY[localIndex] = loadY + localFluidLoadZ[localIndex] = loadZ + localIndex += 1 + + if self.have_MPI: + + # Store the local loads in buffers in the form of numpy arrays + bufXLoad = np.array(localFluidLoadX) + bufYLoad = np.array(localFluidLoadY) + bufZLoad = np.array(localFluidLoadZ) + + # Initialize the global load array + if myid == self.rootProcess: + print("sendCounts: {}, total: {}".format(self.sendCounts, sum(self.sendCounts))) + self.globalFluidLoadX = np.empty(sum(self.sendCounts)) + self.globalFluidLoadY = np.empty(sum(self.sendCounts)) + self.globalFluidLoadZ = np.empty(sum(self.sendCounts)) + + # Gatherv using self.sendCounts maintains the ordering of the coordinates + self.comm.Gatherv(sendbuf=bufXLoad, recvbuf=(self.globalFluidLoadX, self.sendCounts), root=0) + self.comm.Gatherv(sendbuf=bufYLoad, recvbuf=(self.globalFluidLoadY, self.sendCounts), root=0) + self.comm.Gatherv(sendbuf=bufZLoad, recvbuf=(self.globalFluidLoadZ, self.sendCounts), root=0) + + else: + self.globalFluidLoadX = localFluidLoadX.copy() + self.globalFluidLoadY = localFluidLoadY.copy() + self.globalFluidLoadZ = localFluidLoadZ.copy() + + # Delete local variables + del localFluidLoadX, localFluidLoadY, localFluidLoadZ + + ################################################################################################################ + # --- STEP 2: Interpolate + ################################################################################################################ + + # ---> Input: self.globalFluidLoadX, self.globalFluidLoadY, self.globalFluidLoadZ + + if myid == self.rootProcess: + self.globalSolidLoadX = MLSSolver.interpolation_matrix.transpose().dot(self.globalFluidLoadX) + self.globalSolidLoadY = MLSSolver.interpolation_matrix.transpose().dot(self.globalFluidLoadY) + self.globalSolidLoadZ = MLSSolver.interpolation_matrix.transpose().dot(self.globalFluidLoadZ) + + # ---> Output: self.globalSolidLoadX, self.globalSolidLoadY, self.globalSolidLoadZ + + ################################################################################################################ + # --- STEP 3: Check conservation + ################################################################################################################ + + # --- Check for total force conservation after interpolation + if myid == self.rootProcess: + + # Total loads before interpolation, fluid side + FFX = self.globalFluidLoadX.sum() + FFY = self.globalFluidLoadY.sum() + FFZ = self.globalFluidLoadZ.sum() + + # Total loads after interpolation, solid side + FX = self.globalSolidLoadX.sum() + FY = self.globalSolidLoadY.sum() + FZ = self.globalSolidLoadZ.sum() + + self.MPIPrint("Checking f/s interface total force...") + self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ)) + self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ)) + + hist_file = open("historyFSI.dat", "a") + hist_file.write(str(FFX) + "\t" + str(FFY) + "\t" + str(FFZ) + "\t" + str(FX) + "\t" + str(FY) + "\t" + str(FZ) + "\t") + hist_file.close() + + ################################################################################################################ + # --- STEP 4: Transfer to the structural solver + # --- pyBeam runs in single core, so there is no need to deal with the parallelization + ################################################################################################################ + + if self.haveSolidSolver: + # For the vertices that belong to the interface + for iVertex in range(0, self.nSolidInterfaceNodes): + # Store them in the solid solver directly + SolidSolver.SetLoads(iVertex, self.globalSolidLoadX[iVertex], + self.globalSolidLoadY[iVertex], + self.globalSolidLoadZ[iVertex]) + + def transferStructuralDisplacements(self, FSIConfig, FluidSolver, SolidSolver, MLSSolver): + """ + Transfer structural displacements. + Gathers the structural displacements from the interface. + Interpolates the displacements using the transposed matrix. + Applies the fluid displacements by scattering them into the correct position for the fluid solver. + """ + + # Recover the process and the size of the parallelization + myid, MPIsize = self.checkMPI() + + ################################################################################################################ + # --- STEP 1: Retrieve the structural displacements from pyBeam and apply the relaxation + # --- pyBeam runs in single core, so there is no need to deal with the parallelization + ################################################################################################################ + + if self.haveSolidSolver: + + # Recover the relaxation parameter + relaxParam = float(FSIConfig["RELAX_PARAM"]) + + # Store the old displacements + self.globalSolidDispXOld = self.globalSolidDispX.copy() + self.globalSolidDispYOld = self.globalSolidDispY.copy() + self.globalSolidDispZOld = self.globalSolidDispZ.copy() + + # Initialize local vectors, to store the relaxed displacements + relaxedSolidDispX = np.zeros(self.nSolidInterfaceNodes) + relaxedSolidDispY = np.zeros(self.nSolidInterfaceNodes) + relaxedSolidDispZ = np.zeros(self.nSolidInterfaceNodes) + + # For the vertices that belong to the interface + for iVertex in range(0, self.nSolidInterfaceNodes): + + # Store the new displacements in the global load array directly + dispX, dispY, dispZ = SolidSolver.ExtractDisplacements(iVertex) + self.globalSolidDispX[iVertex] = dispX + self.globalSolidDispY[iVertex] = dispY + self.globalSolidDispZ[iVertex] = dispZ + + # Compute the relaxed, interface displacements + relaxedSolidDispX[iVertex] = relaxParam * dispX + (1.0 - relaxParam) * self.globalSolidDispXOld[iVertex] + relaxedSolidDispY[iVertex] = relaxParam * dispY + (1.0 - relaxParam) * self.globalSolidDispYOld[iVertex] + relaxedSolidDispZ[iVertex] = relaxParam * dispZ + (1.0 - relaxParam) * self.globalSolidDispZOld[iVertex] + + ################################################################################################################ + # --- STEP 2: Interpolate + ################################################################################################################ + + # ---> Input: relaxedSolidDispX, relaxedSolidDispY, relaxedSolidDispZ + + if myid == self.rootProcess: + + self.globalFluidDispX = MLSSolver.interpolation_matrix.dot(relaxedSolidDispX) + self.globalFluidDispY = MLSSolver.interpolation_matrix.dot(relaxedSolidDispY) + self.globalFluidDispZ = MLSSolver.interpolation_matrix.dot(relaxedSolidDispZ) + + # ---> Output: self.globalFluidDispX, self.globalFluidDispY, self.globalFluidDispZ + + ################################################################################################################ + # --- STEP 3: Check conservation + ################################################################################################################ + + # --- Checking conservation --- + + if myid == self.rootProcess: + + WSX = self.globalSolidLoadX.dot(relaxedSolidDispX) + WSY = self.globalSolidLoadY.dot(relaxedSolidDispY) + WSZ = self.globalSolidLoadZ.dot(relaxedSolidDispZ) + + WFX = self.globalFluidLoadX.dot(self.globalFluidDispX) + WFY = self.globalFluidLoadY.dot(self.globalFluidDispY) + WFZ = self.globalFluidLoadZ.dot(self.globalFluidDispZ) + + self.MPIPrint("Checking f/s interface conservation...") + self.MPIPrint('Solid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WSX, WSY, WSZ)) + self.MPIPrint('Fluid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WFX, WFY, WFZ)) + + ################################################################################################################ + # --- STEP 4: Transfer to the fluid solver + ################################################################################################################ + + # --- Recover them from the interpolated vectors + localFluidDispX = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidDispY = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + localFluidDispZ = np.zeros(self.nLocalFluidInterfacePhysicalNodes) + + if self.have_MPI: + + self.comm.Scatterv(sendbuf=(self.globalFluidDispX, self.sendCounts), recvbuf=localFluidDispX, root=0) + self.comm.Scatterv(sendbuf=(self.globalFluidDispY, self.sendCounts), recvbuf=localFluidDispY, root=0) + self.comm.Scatterv(sendbuf=(self.globalFluidDispZ, self.sendCounts), recvbuf=localFluidDispZ, root=0) + + # print("rank: {}, local_array X: {}".format(myid, localFluidDispX)) + # print("rank: {}, local_array Y: {}".format(myid, localFluidDispY)) + # print("rank: {}, local_array Z: {}".format(myid, localFluidDispZ)) + + else: + localFluidDispX = self.globalFluidDispX.copy() + localFluidDispY = self.globalFluidDispY.copy() + localFluidDispZ = self.globalFluidDispZ.copy() + + # For the vertices that belong to the interface + localIndex = 0 + for iVertex in self.localFluidInterface_vertex_indices: + # Store them in the mesh displacement routine + FluidSolver.SetMeshDisplacement(self.fluidInterfaceIdentifier, int(iVertex), localFluidDispX[localIndex], + localFluidDispY[localIndex], localFluidDispZ[localIndex]) + # Increment the local index + localIndex += 1 + + # Delete local variables + del localFluidDispX, localFluidDispY, localFluidDispZ + if myid == self.rootProcess: + del relaxedSolidDispX, relaxedSolidDispY, relaxedSolidDispZ + + def SteadyFSI(self, FSIconfig, FluidSolver, SolidSolver, MLSSolver): + """ + Runs the steady FSI computation + Synchronizes the fluid and solid solver with data exchange at the f/s interface. + """ + + # Recover the process and the size of the parallelization + myid, MPIsize = self.checkMPI() + + # --- Set some general variables for the steady computation --- # + nFSIIter = FSIconfig['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step) + + if myid is 0: + hist_file = open("historyFSI.dat", "w") + hist_file.write("FF(X) \t FF(Y) \t FF(Z) \t FS(X) \t FS(Y) \t FS(Z) \t CD \t CL \n") + hist_file.close() + + + self.MPIPrint('\n********************************') + self.MPIPrint('* Begin steady FSI computation *') + self.MPIPrint('********************************\n') + + self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI ***************') + + # --- External FSI loop --- # + self.FSIIter = 0 + + # For the number of iterations allowed + while self.FSIIter < nFSIIter: + + self.MPIPrint("\n>>>> FSI iteration {} <<<<".format(self.FSIIter)) + + if self.FSIIter > 0: + # --- Surface displacements interpolation and communication ---# + self.MPIPrint('\n##### Transferring displacements\n') + self.MPIBarrier() + self.transferStructuralDisplacements(FSIconfig, FluidSolver, SolidSolver, MLSSolver) + + # --- Fluid solver call for FSI subiteration --- # + self.MPIPrint('\n##### Launching fluid solver for a steady computation\n') + self.MPIBarrier() + FluidSolver.ResetConvergence() # Make sure the solver starts convergence from 0 + FluidSolver.Preprocess(0) # Time iteration pre-processing + FluidSolver.Run() # Run one time-step (static: one simulation) + FluidSolver.Postprocess() # Run one time-step (static: one simulation) + FluidSolver.Update() # Update the solver for the next time iteration + FluidSolver.Monitor(0) # Monitor the solver and output solution to file if required + FluidSolver.Output(0) # Output the solution to file + + # --- Surface fluid loads interpolation and communication ---# + self.MPIPrint('\n##### Transferring fluid tractions to the beam solver\n') + self.MPIBarrier() + self.transferFluidTractions(FluidSolver, SolidSolver, MLSSolver) + + # --- Solid solver call for FSI subiteration --- # + self.MPIPrint('\n##### Launching solid solver for a static computation\n') + self.MPIBarrier() + if self.haveSolidSolver: + SolidSolver.run() + + self.FSIIter += 1 + + # Store the surface flow history + if myid is 0: + new_name_surf = "./surface_flow_" + str("{:04d}".format(self.FSIIter)) + ".vtk" + shutil.move("surface_flow.vtk", new_name_surf) + + hist_file = open("historyFSI.dat", "a") + hist_file.write(str(FluidSolver.Get_DragCoeff()) + "\t") + hist_file.write(str(FluidSolver.Get_LiftCoeff()) + "\n") + hist_file.close() + + self.MPIBarrier() + + self.MPIPrint('\nBGS is converged (strong coupling)') + self.MPIPrint(' ') + self.MPIPrint('*************************') + self.MPIPrint('* End FSI computation *') + self.MPIPrint('*************************') + self.MPIPrint(' ') + + def __getGlobalIndex(self, physics, iProc, iLocalVertex): + """ + Calculate the global indexing of interface nodes accross all the partitions. This does not include halo nodes. + """ + + if physics == 'fluid': + globalStartIndex = self.fluidGlobalIndexRange[iProc][iProc][0] + elif physics == 'solid': + globalStartIndex = self.solidGlobalIndexRange[iProc][iProc][0] + + globalIndex = globalStartIndex + iLocalVertex + + return globalIndex diff --git a/SU2_PY/SU2_FSI/switch.py b/SU2_PY/SU2_FSI/switch.py new file mode 100644 index 000000000000..08fef0f24463 --- /dev/null +++ b/SU2_PY/SU2_FSI/switch.py @@ -0,0 +1,52 @@ +# ------------------------------------------------------------------- +# Switch Class +# ------------------------------------------------------------------- +# source: Brian Beck, PSF License, ActiveState Code +# http://code.activestate.com/recipes/410692/ + +class switch(object): + """ Readable switch construction + + Example: + + c = 'z' + for case in switch(c): + if case('a'): pass # only necessary if the rest of the suite is empty + if case('b'): pass + # ... + if case('y'): pass + if case('z'): + print("c is lowercase!") + break + if case('A'): pass + # ... + if case('Z'): + print("c is uppercase!") + break + if case(): # default + print("I dunno what c was!") + + source: Brian Beck, PSF License, ActiveState Code + http://code.activestate.com/recipes/410692/ + """ + + def __init__(self, value): + self.value = value + self.fall = False + + def __iter__(self): + """Return the match method once, then stop""" + yield self.match + raise StopIteration + + def match(self, *args): + """Indicate whether or not to enter a case suite""" + if self.fall or not args: + return True + elif self.value in args: + self.fall = True + return True + else: + return False + +#: class switch() diff --git a/SU2_PY/meson.build b/SU2_PY/meson.build index fa11084efc76..122fbbaef0e4 100644 --- a/SU2_PY/meson.build +++ b/SU2_PY/meson.build @@ -10,9 +10,12 @@ install_data(['continuous_adjoint.py', 'set_ffd_design_var.py', 'compute_polar.py', 'compute_multipoint.py', + 'convert_to_csv.py', 'discrete_adjoint.py', 'direct_differentiation.py', 'fsi_computation.py', + 'pyBeamFSI.py', + 'pyBeamFSI_AD.py', 'SU2_CFD.py'], install_dir: join_paths(get_option('bindir'))) @@ -70,6 +73,12 @@ install_data(['SU2/util/bunch.py', 'SU2/util/__init__.py'], install_dir: join_paths(get_option('bindir'), 'SU2/util')) +install_data(['SU2_FSI/PrimalInterface.py', + 'SU2_FSI/AdjointInterface.py', + 'SU2_FSI/FSI_config.py', + 'SU2_FSI/switch.py'], + install_dir: join_paths(get_option('bindir'), 'SU2_FSI')) + install_data(['FSI_tools/__init__.py', 'FSI_tools/FSIInterface.py', 'FSI_tools/FSI_config.py', @@ -77,5 +86,5 @@ install_data(['FSI_tools/__init__.py', install_dir: join_paths(get_option('bindir'), 'FSI_tools')) install_data(['SU2_Nastran/__init__.py', - 'SU2_Nastran/pysu2_nastran.py',], + 'SU2_Nastran/pysu2_nastran.py'], install_dir: join_paths(get_option('bindir'), 'SU2_Nastran')) diff --git a/SU2_PY/pyBeamFSI.py b/SU2_PY/pyBeamFSI.py new file mode 100755 index 000000000000..25541c6f6b68 --- /dev/null +++ b/SU2_PY/pyBeamFSI.py @@ -0,0 +1,190 @@ +#!/usr/bin/env python + +## \file fsi_computation.py +# \brief Python wrapper code for FSI computation by coupling pyBeam and SU2. +# \author David Thomas, Rocco Bombardieri, Ruben Sanchez +# \version 7.0.8 "Blackbird" +# +# SU2 Project Website: https://su2code.github.io +# +# The SU2 Project is maintained by the SU2 Foundation +# (http://su2foundation.org) +# +# Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md) +# +# SU2 is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# SU2 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with SU2. If not, see . + +# ---------------------------------------------------------------------- +# Imports +# ---------------------------------------------------------------------- + +import os, sys, shutil, copy +import time as timer + +from optparse import OptionParser # use a parser for configuration + +from SU2_FSI import FSI_config as io # imports FSI config tools +from SU2_FSI import PrimalInterface as FSI # imports FSI python tools +import pyBeamInterface as pyBeamInterface +import pyMLSInterface as Spline_Module + +# imports the CFD (SU2) module for FSI computation +import pysu2 +import pyBeam + + +# ------------------------------------------------------------------- +# Main +# ------------------------------------------------------------------- + +def main(): + # --- Get the FSI config file name form the command line options --- # + parser = OptionParser() + parser.add_option("-f", "--file", dest="filename", + help="read config from FILE", metavar="FILE") + parser.add_option("--serial", action="store_true", + help="Specify if we need to initialize MPI", dest="serial", default=False) + + (options, args) = parser.parse_args() + + if options.serial: + comm = 0 + myid = 0 + numberPart = 1 + have_MPI = False + else: + from mpi4py import MPI + comm = MPI.COMM_WORLD + myid = comm.Get_rank() + numberPart = comm.Get_size() + have_MPI = True + + rootProcess = 0 + + # --- Set the working directory --- # + if myid == rootProcess: + if os.getcwd() not in sys.path: + sys.path.append(os.getcwd()) + print("Setting working directory : {}".format(os.getcwd())) + else: + print("Working directory is set to {}".format(os.getcwd())) + + # starts timer + start = timer.time() + + confFile = str(options.filename) + + FSI_config = io.FSIConfig(confFile) # FSI configuration file + CFD_ConFile = FSI_config['SU2_CONFIG'] # CFD configuration file + CSD_ConFile = FSI_config['PYBEAM_CONFIG'] # CSD configuration file + MLS_confFile = FSI_config['MLS_CONFIG_FILE_NAME'] # MLS configuration file + + if have_MPI: + comm.barrier() + + # --- Initialize the fluid solver: SU2 --- # + if myid == rootProcess: + print('\n***************************** Initializing SU2 **************************************') + try: + FluidSolver = pysu2.CSinglezoneDriver(CFD_ConFile, 1, comm) + except TypeError as exception: + print('A TypeError occured in pysu2.CSingleZoneDriver : ', exception) + if serial: + print('ERROR : You are trying to launch a computation without initializing MPI but the wrapper has been built in parallel. Please remove the --serial option that is incompatible with a parallel build.') + else: + print('ERROR : You are trying to initialize MPI with a serial build of the wrapper. Please, add --serial to launch your simulation.') + return + + if have_MPI: + comm.barrier() + + # --- Initialize the solid solver: pyBeam --- # + if myid == rootProcess: + print('\n***************************** Initializing pyBeam ************************************') + try: + SolidSolver = pyBeamInterface.pyBeamSolver(CSD_ConFile) + except TypeError as exception: + print('ERROR building the Solid Solver: ', exception) + else: + SolidSolver = None + + if have_MPI: + comm.barrier() + + # --- Initialize and set the coupling environment --- # + if myid == rootProcess: + print('\n***************************** Initializing FSI interface *****************************') + try: + FSIInterface = FSI.Interface(FSI_config, FluidSolver, SolidSolver, None, have_MPI) + except TypeError as exception: + print('ERROR building the FSI Interface: ', exception) + + if have_MPI: + comm.barrier() + + + if myid == rootProcess: + print('\n***************************** Connect fluid and solid solvers *****************************') + try: + FSIInterface.connect(FSI_config, FluidSolver, SolidSolver) + except TypeError as exception: + print('ERROR building the Interpolation Interface: ', exception) + + if have_MPI: + comm.barrier() + + if myid == rootProcess: # we perform this calculation on the root core + print('\n***************************** Initializing MLS Interpolation *************************') + try: + MLS = Spline_Module.pyMLSInterface(MLS_confFile, FSIInterface.globalFluidCoordinates, + FSIInterface.globalSolidCoordinates) + except TypeError as exception: + print('ERROR building the MLS Interpolation: ', exception) + + else: + MLS = None + + if have_MPI: + comm.barrier() + + # Run the solver + if myid == 0: + print("\n------------------------------ Begin Solver -----------------------------\n") + sys.stdout.flush() + if have_MPI: + comm.Barrier() + + FSIInterface.SteadyFSI(FSI_config, FluidSolver, SolidSolver, MLS) + + # Get drag coefficient + drag = FluidSolver.Get_DragCoeff() + print('DRAG COEFFICIENT: ', drag) + + # Postprocess the solver and exit cleanly + FluidSolver.Postprocessing() + + if FluidSolver is not None: + del FluidSolver + + + return + + +# ------------------------------------------------------------------- +# Run Main Program +# ------------------------------------------------------------------- + +# --- This is only accessed if running from command prompt --- # +if __name__ == '__main__': + main() diff --git a/SU2_PY/pyBeamFSI_AD.py b/SU2_PY/pyBeamFSI_AD.py new file mode 100755 index 000000000000..8cb3d9af155a --- /dev/null +++ b/SU2_PY/pyBeamFSI_AD.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python + +## \file fsi_computation.py +# \brief Python wrapper code for adjoint computation by coupling pyBeam and SU2. +# \author Ruben Sanchez based on work by David Thomas and Rocco Bombardieri +# \version 7.0.8 "Blackbird" +# +# SU2 Project Website: https://su2code.github.io +# +# The SU2 Project is maintained by the SU2 Foundation +# (http://su2foundation.org) +# +# Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md) +# +# SU2 is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# SU2 is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with SU2. If not, see . + +# ---------------------------------------------------------------------- +# Imports +# ---------------------------------------------------------------------- + +import os, sys, shutil, copy +import time as timer + +from optparse import OptionParser # use a parser for configuration + +from SU2_FSI import FSI_config as io # imports FSI config tools +from SU2_FSI import AdjointInterface as FSI # imports FSI python tools +import pyBeamInterface as pyBeamInterface +import pyMLSInterface as Spline_Module + +# imports the CFD (SU2) module for FSI computation +import pysu2ad as pysu2 +import pyBeam + + +# ------------------------------------------------------------------- +# Main +# ------------------------------------------------------------------- + +def main(): + # --- Get the FSI config file name form the command line options --- # + parser = OptionParser() + parser.add_option("-f", "--file", dest="filename", + help="read config from FILE", metavar="FILE") + parser.add_option("--serial", action="store_true", + help="Specify if we need to initialize MPI", dest="serial", default=False) + + (options, args) = parser.parse_args() + + if options.serial: + comm = 0 + myid = 0 + numberPart = 1 + have_MPI = False + else: + from mpi4py import MPI + comm = MPI.COMM_WORLD + myid = comm.Get_rank() + numberPart = comm.Get_size() + have_MPI = True + + rootProcess = 0 + + # --- Set the working directory --- # + if myid == rootProcess: + if os.getcwd() not in sys.path: + sys.path.append(os.getcwd()) + print("Setting working directory : {}".format(os.getcwd())) + else: + print("Working directory is set to {}".format(os.getcwd())) + + # starts timer + start = timer.time() + + confFile = str(options.filename) + + FSI_config = io.FSIConfig(confFile) # FSI configuration file + CFD_ConFile = FSI_config['SU2_CONFIG'] # CFD configuration file + CSD_ConFile = FSI_config['PYBEAM_CONFIG'] # CSD configuration file + MLS_confFile = FSI_config['MLS_CONFIG_FILE_NAME'] # MLS configuration file + + if have_MPI: + comm.barrier() + + # --- Initialize the fluid solver: SU2 --- # + if myid == rootProcess: + print('\n***************************** Initializing SU2 **************************************') + try: + FluidSolver = pysu2.CDiscAdjSinglezoneDriver(CFD_ConFile, 1, comm) + except TypeError as exception: + print('A TypeError occured in pysu2.CSingleZoneDriver : ', exception) + if serial: + print('ERROR : You are trying to launch a computation without initializing MPI but the wrapper has been built in parallel. Please remove the --serial option that is incompatible with a parallel build.') + else: + print('ERROR : You are trying to initialize MPI with a serial build of the wrapper. Please, add --serial to launch your simulation.') + return + + if have_MPI: + comm.barrier() + + # --- Initialize the solid solver: pyBeam --- # + if myid == rootProcess: + print('\n***************************** Initializing pyBeam ************************************') + try: + SolidSolver = pyBeamInterface.pyBeamADSolver(CSD_ConFile) + except TypeError as exception: + print('ERROR building the Solid Solver: ', exception) + else: + SolidSolver = None + + if have_MPI: + comm.barrier() + + # --- Initialize and set the coupling environment --- # + if myid == rootProcess: + print('\n***************************** Initializing FSI interface *****************************') + try: + FSIInterface = FSI.AdjointInterface(FSI_config, FluidSolver, SolidSolver, None, have_MPI) + except TypeError as exception: + print('ERROR building the FSI Interface: ', exception) + + if have_MPI: + comm.barrier() + + + if myid == rootProcess: + print('\n***************************** Connect fluid and solid solvers *****************************') + try: + FSIInterface.connect(FSI_config, FluidSolver, SolidSolver) + except TypeError as exception: + print('ERROR building the Interpolation Interface: ', exception) + + if have_MPI: + comm.barrier() + + if myid == rootProcess: # we perform this calculation on the root core + print('\n***************************** Initializing MLS Interpolation *************************') + try: + MLS = Spline_Module.pyMLSInterface(MLS_confFile, FSIInterface.globalFluidCoordinates, + FSIInterface.globalSolidCoordinates) + except TypeError as exception: + print('ERROR building the MLS Interpolation: ', exception) + + else: + MLS = None + + if have_MPI: + comm.barrier() + + # Run the solver + if myid == 0: + print("\n------------------------------ Begin Solver -----------------------------\n") + sys.stdout.flush() + if have_MPI: + comm.Barrier() + + FSIInterface.SteadyFSI(FSI_config, FluidSolver, SolidSolver, MLS) + + # Postprocess the solver and exit cleanly + FluidSolver.Postprocessing() + + if FluidSolver is not None: + del FluidSolver + + + return + + +# ------------------------------------------------------------------- +# Run Main Program +# ------------------------------------------------------------------- + +# --- This is only accessed if running from command prompt --- # +if __name__ == '__main__': + main() diff --git a/TestCases/pybeam_fsi/adjoint/config.cfg b/TestCases/pybeam_fsi/adjoint/config.cfg new file mode 100644 index 000000000000..0a7cee383c72 --- /dev/null +++ b/TestCases/pybeam_fsi/adjoint/config.cfg @@ -0,0 +1,20 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% Case description: FSI adjoint problem SU2-pyBeam % +% Author: Ruben Sanchez, Rocco Bombardieri % +% Institution: TU Kaiserslautern, Universidad Carlos III Madrid % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% ------------- CFD & CSD CONFIGURATION FILES ------------% + +SU2_CONFIG = configFlow.cfg +PYBEAM_CONFIG = configBeam.cfg +MLS_CONFIG_FILE_NAME = configMLS.cfg + +NDIM = 2 + +NB_FSI_ITER = 3 +FSI_TOLERANCE = 1e-7 +RELAX_PARAM = 0.6 diff --git a/TestCases/pybeam_fsi/adjoint/configBeam.cfg b/TestCases/pybeam_fsi/adjoint/configBeam.cfg new file mode 100644 index 000000000000..46773f6d7387 --- /dev/null +++ b/TestCases/pybeam_fsi/adjoint/configBeam.cfg @@ -0,0 +1,41 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% Case description: FSI adjoint problem SU2-pyBeam % +% Author: Rocco Bombardieri, Ruben Sanchez % +% Institution: Universidad Carlos III Madrid, TU Kaiserslautern % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% Young modulus [GPa] +Y_MODULUS = 40000 + +% Poisson Ratio +POISSON = 0.3 + +% Beam Density [kg/m^3] +RHO = 2700 + +% Number of loadsteps used in the analysis (DEFAULT = 1) +LOAD_STEPS = 1 + +% Number of iterations used in the analysis (DEFAULT = 1) +N_STRUCT_ITER =1 + +% Convergence rate criterium (DEFAULT = 1e-4) +CONV_CRITERIUM = 1e-7 +% +% Tolerance of the linear solver +TOLERANCE_LINSOL = 1e-2 +% +% Kind of linear solver (FullPivHouseholderQr, PartialPivLu, FullPivLu, HouseholderQr, ColPivHouseholderQr, LLT, LDLT) +KIND_LINSOL = FullPivLu + +% MESH file +MESH_FILE = mesh.pyBeam + +% PROPERTY file +PROPERTY_FILE = property.pyBeam + +WRITE_RESTART=1 +RESTART=0 diff --git a/TestCases/pybeam_fsi/adjoint/configFlow.cfg b/TestCases/pybeam_fsi/adjoint/configFlow.cfg new file mode 100644 index 000000000000..263078d268e3 --- /dev/null +++ b/TestCases/pybeam_fsi/adjoint/configFlow.cfg @@ -0,0 +1,157 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% Case description: FSI adjoint problem SU2-pyBeam % +% Author: Ruben Sanchez, Rocco Bombardieri % +% Institution: TU Kaiserslautern, Universidad Carlos III Madrid % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% ------------- DIRECT, ADJOINT, AND LINEARIZED PROBLEM DEFINITION ------------% +% +% Physical governing equations (EULER, NAVIER_STOKES, +% TNE2_EULER, TNE2_NAVIER_STOKES, +% WAVE_EQUATION, HEAT_EQUATION, LINEAR_ELASTICITY, +% POISSON_EQUATION) +SOLVER= INC_NAVIER_STOKES +KIND_TURB_MODEL= NONE +MATH_PROBLEM= DISCRETE_ADJOINT + +RESTART_SOL= NO +UNST_RESTART_ITER = 1 + +SCREEN_OUTPUT= (INNER_ITER, RMS_ADJ_PRESSURE, RMS_ADJ_VELOCITY-X, RMS_VELOCITY-Y, SENS_GEO) + + +% ---------------- INCOMPRESSIBLE FLOW CONDITION DEFINITION -------------------% + +INNER_ITER = 200 + +INC_DENSITY_MODEL= CONSTANT + +INC_ENERGY_EQUATION = NO +INC_DENSITY_INIT= 1.0 +INC_VELOCITY_INIT= ( 1.0, 0.0, 0.0 ) + +INC_TEMPERATURE_INIT= 300 + +INC_NONDIM= INITIAL_VALUES + +INC_DENSITY_REF= 1.0 +INC_VELOCITY_REF= 1.0 +INC_TEMPERATURE_REF = 1.0 + +% --------------------------- VISCOSITY MODEL ---------------------------------% + +VISCOSITY_MODEL= CONSTANT_VISCOSITY +MU_CONSTANT= 0.001 + +% ---------------------- REFERENCE VALUE DEFINITION ---------------------------% + +REF_ORIGIN_MOMENT_X = 0.00 +REF_ORIGIN_MOMENT_Y = 0.00 +REF_ORIGIN_MOMENT_Z = 0.00 + +REF_LENGTH= 0.01 +REF_AREA= 0.01 + +% -------------------- BOUNDARY CONDITION DEFINITION --------------------------% + +MARKER_HEATFLUX= ( wallF, 0.0 ) + +INC_INLET_TYPE= VELOCITY_INLET +INC_OUTLET_TYPE= PRESSURE_OUTLET + +MARKER_INLET= ( inlet, 0.0, 1.0, 1.0, 0.0, 0.0 ) +MARKER_OUTLET= ( outlet, 0.0 ) +MARKER_EULER= ( upper, lower ) + +MARKER_PLOTTING= ( wallF ) +MARKER_MONITORING= ( wallF ) + +MARKER_DEFORM_MESH= ( wallF ) +MARKER_FLUID_LOAD= ( wallF ) + +% ------------- COMMON PARAMETERS DEFINING THE NUMERICAL METHOD ---------------% + +NUM_METHOD_GRAD= WEIGHTED_LEAST_SQUARES +CFL_NUMBER= 1000 + +CFL_ADAPT= NO +CFL_ADAPT_PARAM= ( 1.5, 0.5, 1.0, 100.0 ) + +RK_ALPHA_COEFF= ( 0.66667, 0.66667, 1.000000 ) + +% ------------------------ LINEAR SOLVER DEFINITION ---------------------------% + +LINEAR_SOLVER= FGMRES +LINEAR_SOLVER_PREC= ILU +LINEAR_SOLVER_ERROR= 1E-15 +LINEAR_SOLVER_ITER= 20 + +% --------------------------- MESH PARAMETERS ---------------------------------% + +DEFORM_MESH = YES +DEFORM_STIFFNESS_TYPE = WALL_DISTANCE + +DEFORM_LINEAR_SOLVER = CONJUGATE_GRADIENT +DEFORM_LINEAR_SOLVER_PREC = ILU +DEFORM_LINEAR_SOLVER_ERROR = 1E-8 +DEFORM_LINEAR_SOLVER_ITER = 5000 +DEFORM_CONSOLE_OUTPUT = NO + +% -------------------------- MULTIGRID PARAMETERS -----------------------------% + +MGLEVEL= 0 +MGCYCLE= V_CYCLE + +MG_PRE_SMOOTH= ( 1, 2, 3, 3 ) +MG_POST_SMOOTH= ( 0, 0, 0, 0 ) +MG_CORRECTION_SMOOTH= ( 0, 0, 0, 0 ) +MG_DAMP_RESTRICTION= 0.7 +MG_DAMP_PROLONGATION= 0.7 + +% -------------------- FLOW NUMERICAL METHOD DEFINITION -----------------------% + +CONV_NUM_METHOD_FLOW= FDS +MUSCL_FLOW= YES +SLOPE_LIMITER_FLOW= NONE +TIME_DISCRE_FLOW= EULER_IMPLICIT + +% -------------------- TURBULENT NUMERICAL METHOD DEFINITION ------------------% + +CONV_NUM_METHOD_TURB= SCALAR_UPWIND +MUSCL_TURB= NO +SLOPE_LIMITER_TURB= VENKATAKRISHNAN +TIME_DISCRE_TURB= EULER_IMPLICIT + +% --------------------------- CONVERGENCE PARAMETERS --------------------------% + +CONV_FIELD= RMS_ADJ_PRESSURE +CONV_RESIDUAL_MINVAL= -12 +CONV_STARTITER= 10 +CONV_CAUCHY_ELEMS= 100 +CONV_CAUCHY_EPS= 1E-5 + +% ------------------------- INPUT/OUTPUT INFORMATION --------------------------% + +MESH_FILENAME= mesh.su2 + +MESH_FORMAT= SU2 +MESH_OUT_FILENAME= mesh_out.su2 + +SOLUTION_FILENAME= solution_flow.dat +RESTART_FILENAME= restart_flow.dat +VOLUME_ADJ_FILENAME= adjoint + +TABULAR_FORMAT= TECPLOT +CONV_FILENAME= history + +VOLUME_FILENAME= flow +SURFACE_FILENAME= surface_flow + +WRT_SOL_FREQ= 1000 +WRT_SOL_FREQ_DUALTIME= 1 + +WRT_CON_FREQ= 1 +WRT_CON_FREQ_DUALTIME= 1 diff --git a/TestCases/pybeam_fsi/adjoint/configMLS.cfg b/TestCases/pybeam_fsi/adjoint/configMLS.cfg new file mode 100644 index 000000000000..af52588bbddc --- /dev/null +++ b/TestCases/pybeam_fsi/adjoint/configMLS.cfg @@ -0,0 +1,35 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% FSI SU2-Moving Least Square configuration file % +% Case description: Aeroelastic (FSI) simulation by external solver coupling % +% Author: Rocco Bombardieri % +% Institution: Universidad Carlos III Madrid % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% ------------- MLS parameters ------------% + +% Polinomial order +POLY = 2 + +% Radial Basis weight order +WEIGHT = 4 + +% Max radius +RMAX = 0.015 + +% Number of points for the MLS +POINTS = 33 + +% What's Delta? +DELTA = 1.0 + +% pseudo inverce tollerance condition (MOONRE-PRENDROSE PSEUDOINV ) +TOLL_SVD = 10E-7 + +% ------------- Debugging options ------------% +DEBUG = NO + +% Mode scale factor +MAGNIF_FACTOR = 0.01 diff --git a/TestCases/pybeam_fsi/adjoint/mesh.pyBeam b/TestCases/pybeam_fsi/adjoint/mesh.pyBeam new file mode 100644 index 000000000000..8dfa06dc21dd --- /dev/null +++ b/TestCases/pybeam_fsi/adjoint/mesh.pyBeam @@ -0,0 +1,94 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% Case description: FSI primal adjoint SU2-pyBeam % +% Author: Rocco Bombardieri % +% Institution: Universidad Carlos III Madrid % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Respect the format, mesh and conectivity have to start in line following NPOIN and NELEM + +NDIM = 3 + +NPOIN= 33 + 0 0.000 0 + 0 0.001 0 + 0 0.002 0 + 0 0.003 0 + 0 0.004 0 + 0 0.005 0 + 0 0.006 0 + 0 0.007 0 + 0 0.008 0 + 0 0.009 0 + 0 0.010 0 + -0.00025 0.000 0 + -0.00025 0.001 0 + -0.00025 0.002 0 + -0.00025 0.003 0 + -0.00025 0.004 0 + -0.00025 0.005 0 + -0.00025 0.006 0 + -0.00025 0.007 0 + -0.00025 0.008 0 + -0.00025 0.009 0 + -0.00025 0.010 0 + 0.00025 0.000 0 + 0.00025 0.001 0 + 0.00025 0.002 0 + 0.00025 0.003 0 + 0.00025 0.004 0 + 0.00025 0.005 0 + 0.00025 0.006 0 + 0.00025 0.007 0 + 0.00025 0.008 0 + 0.00025 0.009 0 + 0.00025 0.010 0 + +% Element type: 1 = line, 5 = triangular +% ELEM_TYPE ELEM_NODEID_1 ELEM_NODEID_2 ELEM_PROPERTY AUXILIARY_Vector +NELEM= 32 + 1 2 1 1 0 0 + 2 3 1 1 0 0 + 3 4 1 1 0 0 + 4 5 1 1 0 0 + 5 6 1 1 0 0 + 6 7 1 1 0 0 + 7 8 1 1 0 0 + 8 9 1 1 0 0 + 9 10 1 1 0 0 + 10 11 1 1 0 0 + 1 12 2 0 1 0 + 2 13 2 0 1 0 + 3 14 2 0 1 0 + 4 15 2 0 1 0 + 5 16 2 0 1 0 + 6 17 2 0 1 0 + 7 18 2 0 1 0 + 8 19 2 0 1 0 + 9 20 2 0 1 0 + 10 21 2 0 1 0 + 11 22 2 0 1 0 + 1 23 2 0 1 0 + 2 24 2 0 1 0 + 3 25 2 0 1 0 + 4 26 2 0 1 0 + 5 27 2 0 1 0 + 6 28 2 0 1 0 + 7 29 2 0 1 0 + 8 30 2 0 1 0 + 9 31 2 0 1 0 + 10 32 2 0 1 0 + 11 33 2 0 1 0 + +% NODE_ID DOF_ID (NOTE: NODE_ID starts from 1 and DOF_ID is 1-6) +NCONSTR = 6 +1 1 +1 2 +1 3 +1 4 +1 5 +1 6 + +% MASTER_NODE SLAVE_NODE ( +NRBE2 = 0 diff --git a/TestCases/pybeam_fsi/adjoint/property.pyBeam b/TestCases/pybeam_fsi/adjoint/property.pyBeam new file mode 100644 index 000000000000..821ec3d865fb --- /dev/null +++ b/TestCases/pybeam_fsi/adjoint/property.pyBeam @@ -0,0 +1,14 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% Case description: FSI adjoint problem SU2-pyBeam % +% Author: Rocco Bombardieri % +% Institution: Universidad Carlos III Madrid % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% Respect the format, properties have to start in line following NPROP +% Format: AREA Iyy Izz Jt +NPROP = 2 +0.0005 4.16666666666667E-05 1.04166666666667E-11 4.16666770833333E-05 +0.0050 4.16666666666667E-05 1.04166666666667E-11 4.16666770833333E-05 diff --git a/TestCases/pybeam_fsi/adjoint/solution.pyBeam b/TestCases/pybeam_fsi/adjoint/solution.pyBeam new file mode 100644 index 000000000000..fb3007d1347c --- /dev/null +++ b/TestCases/pybeam_fsi/adjoint/solution.pyBeam @@ -0,0 +1,34 @@ +Node ID U 1 U 2 U 3 U 4 U 5 U 6 // +1 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 +2 8.89440276884879169e-05 -3.95618489934994222e-06 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -1.71602360991139291e-01 +3 3.28527763648690189e-04 -3.29528101358207384e-05 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -3.06493777408012980e-01 +4 6.81301993042742480e-04 -9.70374200076978914e-05 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -4.09526914484095061e-01 +5 1.11598601280878357e-03 -1.96200378323702469e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -4.85515179663029928e-01 +6 1.60778837762034403e-03 -3.25220488527650658e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -5.39218955547780099e-01 +7 2.13786497111382234e-03 -4.77000129509082434e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -5.75327711138957087e-01 +8 2.69238506103191286e-03 -6.44573038814034950e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -5.98047101289778116e-01 +9 3.26136063036131614e-03 -8.21986375925958575e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -6.10800037661763273e-01 +10 3.83768434133515942e-03 -1.00457924115847277e-03 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -6.16485173129958652e-01 +11 4.41667848140825294e-03 -1.18914091806341933e-03 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -6.17907455523268423e-01 +12 7.48122266047275233e-10 -9.83277224294213400e-10 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 5.89969321240683667e-06 +13 9.26166804013387954e-05 3.87341172179890507e-05 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -1.71602861477094609e-01 +14 3.40179337593107085e-04 4.24771728398866155e-05 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -3.06498640679549239e-01 +15 7.01975801807187241e-04 2.50737313626729510e-06 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -4.09534802577013912e-01 +16 1.14487881830260645e-03 -7.95330842416569706e-05 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -4.85526072664893116e-01 +17 1.64326257950200968e-03 -1.96852545800038783e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -5.39231708441125224e-01 +18 2.17811308554286161e-03 -3.40971290291998852e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -5.75340037616694167e-01 +19 2.73577754442190643e-03 -5.03814066027363154e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -5.98060526686583493e-01 +20 3.30656560892204606e-03 -6.78603084767575942e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -6.10820543272860284e-01 +21 3.88370897259809108e-03 -8.60032340393220929e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -6.16517876974919998e-01 +22 4.46290995977189665e-03 -1.04430257359832291e-03 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -6.17951059481785525e-01 +23 7.41672888120299199e-10 -9.87828985932742111e-10 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -5.92699188932144464e-06 +24 8.52728756142194836e-05 -4.66465988808328173e-05 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -1.71601984585993933e-01 +25 3.16877978290830691e-04 -1.08381700755134005e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -3.06489154226860849e-01 +26 6.60630417143066883e-04 -1.96580392130054086e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -4.09519445473651544e-01 +27 1.08709608776144792e-03 -3.12865213470571305e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -4.85504958837879086e-01 +28 1.57231749844052946e-03 -4.53585627828285510e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -5.39207038008002071e-01 +29 2.09762003776210780e-03 -6.13026283232274895e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -5.75316133460538670e-01 +30 2.64899585948754441e-03 -7.85328947545858576e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -5.98034246615223974e-01 +31 3.21616028794251057e-03 -9.65364666650571249e-04 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -6.10780011812895762e-01 +32 3.79166676437984974e-03 -1.14911787634797212e-03 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -6.16452941309688640e-01 +33 4.37045617853931093e-03 -1.33396804736337564e-03 0.00000000000000000e+00 0.00000000000000000e+00 0.00000000000000000e+00 -6.17864317208002722e-01 diff --git a/TestCases/pybeam_fsi/primal/config.cfg b/TestCases/pybeam_fsi/primal/config.cfg new file mode 100644 index 000000000000..aa755ad9e8c4 --- /dev/null +++ b/TestCases/pybeam_fsi/primal/config.cfg @@ -0,0 +1,20 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% Case description: FSI primal problem SU2-pyBeam % +% Author: Ruben Sanchez, Rocco Bombardieri % +% Institution: TU Kaiserslautern, Universidad Carlos III Madrid % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% ------------- CFD & CSD CONFIGURATION FILES ------------% + +SU2_CONFIG = configFlow.cfg +PYBEAM_CONFIG = configBeam.cfg +MLS_CONFIG_FILE_NAME = configMLS.cfg + +NDIM = 2 + +NB_FSI_ITER = 3 +FSI_TOLERANCE = 1e-7 +RELAX_PARAM = 0.6 diff --git a/TestCases/pybeam_fsi/primal/configBeam.cfg b/TestCases/pybeam_fsi/primal/configBeam.cfg new file mode 100644 index 000000000000..0d3f4ac1a1a6 --- /dev/null +++ b/TestCases/pybeam_fsi/primal/configBeam.cfg @@ -0,0 +1,39 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% Case description: FSI primal problem SU2-pyBeam % +% Author: Rocco Bombardieri, Ruben Sanchez % +% Institution: Universidad Carlos III Madrid, TU Kaiserslautern % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% Young modulus [GPa] +Y_MODULUS = 40000 + +% Poisson Ratio +POISSON = 0.3 + +% Beam Density [kg/m^3] +RHO = 2700 + +% Number of loadsteps used in the analysis (DEFAULT = 1) +LOAD_STEPS = 1 + +% Number of iterations used in the analysis (DEFAULT = 1) +N_STRUCT_ITER =20 + +% Convergence rate criterium (DEFAULT = 1e-4) +CONV_CRITERIUM = 1e-7 + +% +% Tolerance of the linear solver +TOLERANCE_LINSOL = 1e-2 +% +% Kind of linear solver (FullPivHouseholderQr, PartialPivLu, FullPivLu, HouseholderQr, ColPivHouseholderQr, LLT, LDLT) +KIND_LINSOL = FullPivLu + +% MESH file +MESH_FILE = mesh.pyBeam + +% PROPERTY file +PROPERTY_FILE = property.pyBeam diff --git a/TestCases/pybeam_fsi/primal/configFlow.cfg b/TestCases/pybeam_fsi/primal/configFlow.cfg new file mode 100644 index 000000000000..672cba499e7f --- /dev/null +++ b/TestCases/pybeam_fsi/primal/configFlow.cfg @@ -0,0 +1,153 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% Case description: FSI primal problem SU2-pyBeam % +% Author: Ruben Sanchez, Rocco Bombardieri % +% Institution: TU Kaiserslautern, Universidad Carlos III Madrid % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% ------------- DIRECT, ADJOINT, AND LINEARIZED PROBLEM DEFINITION ------------% +% +% Physical governing equations (EULER, NAVIER_STOKES, +% TNE2_EULER, TNE2_NAVIER_STOKES, +% WAVE_EQUATION, HEAT_EQUATION, LINEAR_ELASTICITY, +% POISSON_EQUATION) +SOLVER= INC_NAVIER_STOKES +KIND_TURB_MODEL= NONE +MATH_PROBLEM= DIRECT + +RESTART_SOL= NO +UNST_RESTART_ITER = 1 + +SCREEN_OUTPUT= (INNER_ITER, RMS_PRESSURE, RMS_VELOCITY-X, RMS_VELOCITY-Y, DRAG) + + +% ---------------- INCOMPRESSIBLE FLOW CONDITION DEFINITION -------------------% + +INNER_ITER = 200 + +INC_DENSITY_MODEL= CONSTANT + +INC_ENERGY_EQUATION = NO +INC_DENSITY_INIT= 1.0 +INC_VELOCITY_INIT= ( 1.0, 0.0, 0.0 ) + +INC_TEMPERATURE_INIT= 300 + +INC_NONDIM= INITIAL_VALUES + +INC_DENSITY_REF= 1.0 +INC_VELOCITY_REF= 1.0 +INC_TEMPERATURE_REF = 1.0 + +% --------------------------- VISCOSITY MODEL ---------------------------------% + +VISCOSITY_MODEL= CONSTANT_VISCOSITY +MU_CONSTANT= 0.001 + +% ---------------------- REFERENCE VALUE DEFINITION ---------------------------% + +REF_ORIGIN_MOMENT_X = 0.00 +REF_ORIGIN_MOMENT_Y = 0.00 +REF_ORIGIN_MOMENT_Z = 0.00 + +REF_LENGTH= 0.01 +REF_AREA= 0.01 + +% -------------------- BOUNDARY CONDITION DEFINITION --------------------------% + +MARKER_HEATFLUX= ( wallF, 0.0 ) + +INC_INLET_TYPE= VELOCITY_INLET +INC_OUTLET_TYPE= PRESSURE_OUTLET + +MARKER_INLET= ( inlet, 0.0, 1.0, 1.0, 0.0, 0.0 ) +MARKER_OUTLET= ( outlet, 0.0 ) +MARKER_EULER= ( upper, lower ) + +MARKER_PLOTTING= ( wallF ) +MARKER_MONITORING= ( wallF ) + +MARKER_DEFORM_MESH= ( wallF ) +MARKER_FLUID_LOAD= ( wallF ) + +% ------------- COMMON PARAMETERS DEFINING THE NUMERICAL METHOD ---------------% + +NUM_METHOD_GRAD= WEIGHTED_LEAST_SQUARES +CFL_NUMBER= 1000 + +CFL_ADAPT= NO +CFL_ADAPT_PARAM= ( 1.5, 0.5, 1.0, 100.0 ) + +RK_ALPHA_COEFF= ( 0.66667, 0.66667, 1.000000 ) + +% ------------------------ LINEAR SOLVER DEFINITION ---------------------------% + +LINEAR_SOLVER= FGMRES +LINEAR_SOLVER_PREC= ILU +LINEAR_SOLVER_ERROR= 1E-15 +LINEAR_SOLVER_ITER= 20 + +% --------------------------- MESH PARAMETERS ---------------------------------% + +DEFORM_MESH = YES +DEFORM_STIFFNESS_TYPE = WALL_DISTANCE + +DEFORM_LINEAR_SOLVER = CONJUGATE_GRADIENT +DEFORM_LINEAR_SOLVER_PREC = ILU +DEFORM_LINEAR_SOLVER_ERROR = 1E-8 +DEFORM_LINEAR_SOLVER_ITER = 5000 +DEFORM_CONSOLE_OUTPUT = NO + +% -------------------------- MULTIGRID PARAMETERS -----------------------------% + +MGLEVEL= 0 +% -------------------- FLOW NUMERICAL METHOD DEFINITION -----------------------% + +CONV_NUM_METHOD_FLOW= FDS +MUSCL_FLOW= YES +SLOPE_LIMITER_FLOW= NONE +TIME_DISCRE_FLOW= EULER_IMPLICIT + +% -------------------- TURBULENT NUMERICAL METHOD DEFINITION ------------------% + +CONV_NUM_METHOD_TURB= SCALAR_UPWIND +MUSCL_TURB= NO +SLOPE_LIMITER_TURB= VENKATAKRISHNAN +TIME_DISCRE_TURB= EULER_IMPLICIT + +% --------------------------- CONVERGENCE PARAMETERS --------------------------% + +CONV_FIELD= RMS_PRESSURE +CONV_RESIDUAL_MINVAL= -12 +CONV_STARTITER= 10 +CONV_CAUCHY_ELEMS= 100 +CONV_CAUCHY_EPS= 1E-5 + +% ------------------------- INPUT/OUTPUT INFORMATION --------------------------% + +MESH_FILENAME= mesh.su2 + +MESH_FORMAT= SU2 +MESH_OUT_FILENAME= mesh_out.su2 + +SOLUTION_FILENAME= solution_flow.dat +RESTART_FILENAME= restart_flow.dat +VOLUME_ADJ_FILENAME= flow_adjoint + +TABULAR_FORMAT= CSV +CONV_FILENAME= history + +VOLUME_FILENAME= flow +SURFACE_FILENAME= surface_flow + +WRT_SOL_FREQ= 1000 +WRT_SOL_FREQ_DUALTIME= 1 + +WRT_CON_FREQ= 1 +WRT_CON_FREQ_DUALTIME= 1 + + +WRT_BINARY_RESTART=NO +READ_BINARY_RESTART=NO diff --git a/TestCases/pybeam_fsi/primal/configMLS.cfg b/TestCases/pybeam_fsi/primal/configMLS.cfg new file mode 100644 index 000000000000..0bea0f61567c --- /dev/null +++ b/TestCases/pybeam_fsi/primal/configMLS.cfg @@ -0,0 +1,35 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% FSI SU2-Moving Least Square configuration file % +% Case description: Aeroelastic (FSI) simulation by external solver coupling % +% Author: Rocco Bombardieri % +% Institution: Universidad Carlos III Madrid % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% ------------- MLS parameters ------------% + +% Polinomial order +POLY = 2 + +% Radial Basis weight order +WEIGHT = 4 + +% Max radius +RMAX = 0.015 + +% Number of points for the MLS +POINTS = 33 + +% +DELTA = 1.0 + +% pseudo inverce tollerance condition (MOONRE-PRENDROSE PSEUDOINV ) +TOLL_SVD = 10E-7 + +% ------------- Debugging options ------------% +DEBUG = NO + +% Mode scale factor +MAGNIF_FACTOR = 0.01 diff --git a/TestCases/pybeam_fsi/primal/mesh.pyBeam b/TestCases/pybeam_fsi/primal/mesh.pyBeam new file mode 100644 index 000000000000..119b2b5c6a5c --- /dev/null +++ b/TestCases/pybeam_fsi/primal/mesh.pyBeam @@ -0,0 +1,95 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% Case description: FSI primal problem SU2-pyBeam % +% Author: Rocco Bombardieri % +% Institution: Universidad Carlos III Madrid % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% Respect the format, mesh and conectivity have to start in line following NPOIN and NELEM + +NDIM = 3 + +NPOIN= 33 + 0 0.000 0 + 0 0.001 0 + 0 0.002 0 + 0 0.003 0 + 0 0.004 0 + 0 0.005 0 + 0 0.006 0 + 0 0.007 0 + 0 0.008 0 + 0 0.009 0 + 0 0.010 0 + -0.00025 0.000 0 + -0.00025 0.001 0 + -0.00025 0.002 0 + -0.00025 0.003 0 + -0.00025 0.004 0 + -0.00025 0.005 0 + -0.00025 0.006 0 + -0.00025 0.007 0 + -0.00025 0.008 0 + -0.00025 0.009 0 + -0.00025 0.010 0 + 0.00025 0.000 0 + 0.00025 0.001 0 + 0.00025 0.002 0 + 0.00025 0.003 0 + 0.00025 0.004 0 + 0.00025 0.005 0 + 0.00025 0.006 0 + 0.00025 0.007 0 + 0.00025 0.008 0 + 0.00025 0.009 0 + 0.00025 0.010 0 + +% Element type: 1 = line, 5 = triangular +% ELEM_TYPE ELEM_NODEID_1 ELEM_NODEID_2 ELEM_PROPERTY AUXILIARY_Vector +NELEM= 32 + 1 2 1 1 0 0 + 2 3 1 1 0 0 + 3 4 1 1 0 0 + 4 5 1 1 0 0 + 5 6 1 1 0 0 + 6 7 1 1 0 0 + 7 8 1 1 0 0 + 8 9 1 1 0 0 + 9 10 1 1 0 0 + 10 11 1 1 0 0 + 1 12 2 0 1 0 + 2 13 2 0 1 0 + 3 14 2 0 1 0 + 4 15 2 0 1 0 + 5 16 2 0 1 0 + 6 17 2 0 1 0 + 7 18 2 0 1 0 + 8 19 2 0 1 0 + 9 20 2 0 1 0 + 10 21 2 0 1 0 + 11 22 2 0 1 0 + 1 23 2 0 1 0 + 2 24 2 0 1 0 + 3 25 2 0 1 0 + 4 26 2 0 1 0 + 5 27 2 0 1 0 + 6 28 2 0 1 0 + 7 29 2 0 1 0 + 8 30 2 0 1 0 + 9 31 2 0 1 0 + 10 32 2 0 1 0 + 11 33 2 0 1 0 + +% NODE_ID DOF_ID (NOTE: NODE_ID starts from 1 and DOF_ID is 1-6) +NCONSTR = 6 +1 1 +1 2 +1 3 +1 4 +1 5 +1 6 + +% MASTER_NODE SLAVE_NODE ( +NRBE2 = 0 diff --git a/TestCases/pybeam_fsi/primal/mesh.readme b/TestCases/pybeam_fsi/primal/mesh.readme new file mode 100644 index 000000000000..78105c4dbf5f --- /dev/null +++ b/TestCases/pybeam_fsi/primal/mesh.readme @@ -0,0 +1 @@ +Copy the mesh from the pybeam adjoint example. diff --git a/TestCases/pybeam_fsi/primal/property.pyBeam b/TestCases/pybeam_fsi/primal/property.pyBeam new file mode 100644 index 000000000000..14fa7a959b22 --- /dev/null +++ b/TestCases/pybeam_fsi/primal/property.pyBeam @@ -0,0 +1,14 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% Case description: FSI primal problem SU2-pyBeam % +% Author: Rocco Bombardieri % +% Institution: Universidad Carlos III Madrid % +% Date: 30/08/2019 % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% Respect the format, properties have to start in line following NPROP +% Format: AREA Iyy Izz Jt +NPROP = 2 +0.0005 4.16666666666667E-05 1.04166666666667E-11 4.16666770833333E-05 +0.0050 4.16666666666667E-05 1.04166666666667E-11 4.16666770833333E-05 diff --git a/meson.build b/meson.build index 0fa3795ec60a..adfd5086ceae 100644 --- a/meson.build +++ b/meson.build @@ -186,6 +186,10 @@ if get_option('enable-pywrapper') subdir('SU2_PY/pySU2') endif +if get_option('enable-pybeam') + subproject('pyBeam') +endif + message('''------------------------------------------------------------------------- | ___ _ _ ___ | | / __| | | |_ ) Release 7.0.8 "Blackbird" | @@ -204,7 +208,8 @@ message('''--------------------------------------------------------------------- Intel-MKL: @7@ OpenBlas: @8@ PaStiX: @9@ - Mixed Float: @10@ + pyBeam: @10@ + Mixed Float: @11@ Please be sure to add the $SU2_HOME and $SU2_RUN environment variables, and update your $PATH (and $PYTHONPATH if applicable) with $SU2_RUN @@ -216,8 +221,9 @@ message('''--------------------------------------------------------------------- export PATH=$PATH:$SU2_RUN export PYTHONPATH=$PYTHONPATH:$SU2_RUN - Use './ninja -C @11@ install' to compile and install SU2 + Use './ninja -C @12@ install' to compile and install SU2 '''.format(get_option('prefix')+'/bin', meson.source_root(), get_option('enable-tecio'), get_option('enable-cgns'), get_option('enable-autodiff'), get_option('enable-directdiff'), get_option('enable-pywrapper'), get_option('enable-mkl'), - get_option('enable-openblas'), get_option('enable-pastix'), get_option('enable-mixedprec'), meson.build_root().split('/')[-1])) + get_option('enable-openblas'), get_option('enable-pastix'), get_option('enable-pybeam'), get_option('enable-mixedprec'), + meson.build_root().split('/')[-1])) diff --git a/meson_options.txt b/meson_options.txt index 79ff09343d3c..c614530702d5 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -13,6 +13,7 @@ option('blas-name', type : 'string', value : 'openblas', description: 'name of t option('enable-pastix', type : 'boolean', value : false, description: 'enable PaStiX support') option('pastix_root', type : 'string', value : 'externals/pastix/', description: 'PaStiX base directory') option('scotch_root', type : 'string', value : 'externals/scotch/', description: 'Scotch base directory') +option('enable-pybeam', type : 'boolean', value : false, description: 'enable pyBeam solver') option('custom-mpi', type : 'boolean', value : false, description: 'enable MPI assuming the compiler and/or env vars give the correct include dirs and linker args.') option('enable-tests', type : 'boolean', value : false, description: 'compile Unit Tests') option('enable-mixedprec', type : 'boolean', value : false, description: 'use single precision floating point arithmetic for sparse algebra') diff --git a/meson_scripts/init.py b/meson_scripts/init.py index b284ad498e67..e00d93189e95 100755 --- a/meson_scripts/init.py +++ b/meson_scripts/init.py @@ -52,6 +52,8 @@ def init_submodules(method = 'auto'): github_repo_meson = 'https://github.com/mesonbuild/meson' sha_version_ninja = '52649de2c56b63f42bc59513d51286531c595b44' github_repo_ninja = 'https://github.com/ninja-build/ninja' + sha_version_pybeam = '358f9e0fd768f2f7c2f524953f30c0d675866cce' + github_repo_pybeam = 'https://github.com/pyBeam/pyBeam' medi_name = 'MeDiPack' codi_name = 'CoDiPack' @@ -62,6 +64,8 @@ def init_submodules(method = 'auto'): alt_name_codi = base_path + 'codi' alt_name_meson = base_path + 'meson' alt_name_ninja = base_path + 'ninja' + subproj_path = cur_dir + os.path.sep + 'subprojects' + os.path.sep + alt_name_pybeam = subproj_path + 'pyBeam' if method == 'auto': is_git = is_git_directory(cur_dir) @@ -80,6 +84,7 @@ def init_submodules(method = 'auto'): submodule_status(alt_name_medi, sha_version_medi) submodule_status(alt_name_meson, sha_version_meson) submodule_status(alt_name_ninja, sha_version_ninja) + submodule_status(alt_name_pybeam, sha_version_pybeam) # Otherwise download the zip file from git else: download_module(codi_name, alt_name_codi, github_repo_codi, sha_version_codi) @@ -116,11 +121,11 @@ def submodule_status(path, sha_commit): # Write a warning that the sha tags do not match sys.stderr.write('WARNING: the currently checked out submodule commit in ' + path + ' does not match the SHA-1 found in the index.\n') - sys.stderr.write('Use \'git submodule update --init '+ path + '\' to reset the module if necessary.\n') + sys.stderr.write('Use \'git submodule update --init --recursive '+ path + '\' to reset the module if necessary.\n') elif status_indicator == '-': # Initialize the submodule if necessary print('Initialize submodule ' + path + ' using git ... ') - subprocess.run(['git', 'submodule', 'update', '--init', path], check = True, cwd = sys.path[0]) + subprocess.run(['git', 'submodule', 'update', '--init', '--recursive', path], check = True, cwd = sys.path[0]) # Check that the SHA tag stored in this file matches the one stored in the git index cur_sha_commit = status[1:].split(' ')[0] diff --git a/subprojects/pyBeam b/subprojects/pyBeam new file mode 160000 index 000000000000..358f9e0fd768 --- /dev/null +++ b/subprojects/pyBeam @@ -0,0 +1 @@ +Subproject commit 358f9e0fd768f2f7c2f524953f30c0d675866cce