From 01bb85b8f9ef6f76e0a7a2757e87ebdde8a814cc Mon Sep 17 00:00:00 2001 From: EvertBunschoten Date: Mon, 12 Sep 2022 16:03:56 +0200 Subject: [PATCH 01/84] Added MLP classes to numerics folder --- .../numerics/multilayer_perceptron/IOMap.hpp | 80 +++++ .../numerics/multilayer_perceptron/Layer.hpp | 63 ++++ .../multilayer_perceptron/LookUp_ANN.hpp | 131 +++++++ .../multilayer_perceptron/NeuralNetwork.hpp | 174 +++++++++ .../numerics/multilayer_perceptron/Neuron.hpp | 74 ++++ .../ReadNeuralNetwork.hpp | 74 ++++ SU2_CFD/obj/Makefile.am | 6 + SU2_CFD/src/meson.build | 8 +- .../numerics/multilayer_perceptron/IOMap.cpp | 60 ++++ .../numerics/multilayer_perceptron/Layer.cpp | 62 ++++ .../multilayer_perceptron/LookUp_ANN.cpp | 268 ++++++++++++++ .../multilayer_perceptron/NeuralNetwork.cpp | 332 ++++++++++++++++++ .../numerics/multilayer_perceptron/Neuron.cpp | 114 ++++++ .../ReadNeuralNetwork.cpp | 151 ++++++++ 14 files changed, 1596 insertions(+), 1 deletion(-) create mode 100644 SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp create mode 100644 SU2_CFD/include/numerics/multilayer_perceptron/Layer.hpp create mode 100644 SU2_CFD/include/numerics/multilayer_perceptron/LookUp_ANN.hpp create mode 100644 SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp create mode 100644 SU2_CFD/include/numerics/multilayer_perceptron/Neuron.hpp create mode 100644 SU2_CFD/include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp create mode 100644 SU2_CFD/src/numerics/multilayer_perceptron/IOMap.cpp create mode 100644 SU2_CFD/src/numerics/multilayer_perceptron/Layer.cpp create mode 100644 SU2_CFD/src/numerics/multilayer_perceptron/LookUp_ANN.cpp create mode 100644 SU2_CFD/src/numerics/multilayer_perceptron/NeuralNetwork.cpp create mode 100644 SU2_CFD/src/numerics/multilayer_perceptron/Neuron.cpp create mode 100644 SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp b/SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp new file mode 100644 index 000000000000..66878762edbe --- /dev/null +++ b/SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp @@ -0,0 +1,80 @@ +/*! + * \file IOMap.hpp + * \brief Declaration of input-to-output mapping class for artifical neural networks + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ +#pragma once + +#include +#include +#include +#include +#include +#include +#include "../../../../Common/include/CConfig.hpp" +#include "../../../../Common/include/linear_algebra/blas_structure.hpp" +#include "LookUp_ANN.hpp" + +using namespace std; +class LookUp_ANN; + +class IOMap +{ + private: + vector ANN_indices; + vector>> Input_Map; + vector>> Output_Map; + vector> input_indices; + public: + IOMap(){}; + IOMap(LookUp_ANN*ANN_collection, vector &inputs, vector &outputs); + ~IOMap(){}; + void Push_ANN_index(size_t i_ANN){ANN_indices.push_back(i_ANN);}; + void PairVariableswithANNs(LookUp_ANN * ANN_collection, vector &inputs, vector &outputs); + size_t GetNANNs(){return ANN_indices.size();} + size_t GetANNIndex(size_t i_Map){return ANN_indices[i_Map];} + size_t GetInputIndex(size_t i_Map, size_t iInput){return Input_Map[i_Map][iInput].first;} + size_t GetOutputIndex(size_t i_Map, size_t iOutput){return Output_Map[i_Map][iOutput].first;} + vector> GetOutputMapping(size_t i_map){return Output_Map[i_map];} + vector> GetInputMapping(size_t i_map){return Input_Map[i_map];} + + vector GetANN_Inputs(size_t i_Map, vector&inputs){ + vector ANN_input; + ANN_input.resize(Input_Map[i_Map].size()); + + for(size_t iInput=0; iInput GetANN_Outputs(size_t i_Map, vector&outputs){ + vector ANN_output; + ANN_output.resize(Output_Map[i_Map].size()); + + for(size_t iOutput=0; iOutput. + */ +#pragma once + +#include +#include +#include +#include + +#include "../../../../Common/include/CConfig.hpp" +#include "Neuron.hpp" +#include "../../../../Common/include/linear_algebra/blas_structure.hpp" +using namespace std; + +class Layer +{ +private: + unsigned long number_of_neurons; + Neuron * neurons; + bool input; + string activation_type; +public: + Layer(); + Layer(unsigned long n_neurons); + void setNNeurons(unsigned long n_neurons); + unsigned long getNNeurons(){return number_of_neurons;}; + void sayhi(); + void setInput(bool def){input = def;}; + bool isInput(){return input;}; + void setValue(size_t i_neuron, su2double value){neurons[i_neuron].setValue(value);} + su2double getValue(size_t i_neuron){return neurons[i_neuron].getValue();} + void setBias(size_t i_neuron, su2double value){neurons[i_neuron].setBias(value);} + su2double getBias(size_t i_neuron){return neurons[i_neuron].getBias();} + su2double getdYdX(size_t i_neuron){return neurons[i_neuron].getGradient();} + string getActivationType(){return activation_type;} + ~Layer(){ + delete [] neurons; + }; +}; diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/LookUp_ANN.hpp b/SU2_CFD/include/numerics/multilayer_perceptron/LookUp_ANN.hpp new file mode 100644 index 000000000000..9cab1a9204c1 --- /dev/null +++ b/SU2_CFD/include/numerics/multilayer_perceptron/LookUp_ANN.hpp @@ -0,0 +1,131 @@ +/*! + * \file LookUp_ANN.hpp + * \brief Declaration of artificial neural network interpolation class + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include "../../../../Common/include/CConfig.hpp" +#include "../../../../Common/include/linear_algebra/blas_structure.hpp" +#include "NeuralNetwork.hpp" +#include "IOMap.hpp" + +using namespace std; +class IOMap; + +class LookUp_ANN +{ + private: + int rank{0}; + vector NeuralNetworks; /*!< Vector containing all loaded neural networks. */ + + vector ANN_filenames; /*!< Filenames containing ANN architecture information. */ + + unsigned long number_of_variables; /*!< Number of loaded ANNs. */ + + /*! + * \brief Skip to certain flag in file. + * \param[in] file_stream - input stream of file + * \param[in] flag - line to search for + * \return line in file. + */ + string SkipToFlag(ifstream *file_stream, string flag); + + /*! + * \brief Load ANN architecture + * \param[in] ANN - pointer to target NeuralNetwork class + * \param[in] filename - filename containing ANN architecture information + */ + void GenerateANN(NeuralNetwork*ANN, string filename); + + /*! + * \brief Read ANN architecture input file + * \param[in] filename - filename containing ANN architecture information + */ + void ReadANNInputFile(string fileName); + + public: + + /*! + * \brief ANN collection class constructor + * \param[in] filename - filename containing list of ANN input files + */ + LookUp_ANN(string fileName); + + /*! + * \brief Evaluate loaded ANNs for given inputs and outputs + * \param[in] input_output_map - input-output map coupling desired inputs and outputs to loaded ANNs + * \param[in] inputs - input values + * \param[in] outputs - pointers to output variables + */ + void Predict_ANN(IOMap *input_output_map, vector &inputs, vector &outputs); + + ~LookUp_ANN(){for(size_t i_ANN=0; i_ANN &output_names, IOMap *input_output_map) const; + + /*! + * \brief Check if all output variables are present in the loaded ANNs + * \param[in] output_names - output variable names to check + * \param[in] input_output_map - pointer to input-output map to be checked + */ + bool Check_Use_of_Outputs(vector &output_names, IOMap *input_output_map) const; + + /*! + * \brief Check if all input variables are present in the loaded ANNs + * \param[in] input_names - input variable names to check + * \param[in] input_output_map - pointer to input-output map to be checked + */ + bool Check_Use_of_Inputs(vector &input_names, IOMap *input_output_map) const; + + /*! + * \brief Map variable names to ANN inputs or outputs + * \param[in] i_ANN - loaded ANN index + * \param[in] variable_names - variable names to map to ANN inputs or outputs + * \param[in] input - map to inputs (true) or outputs (false) + */ + vector> FindVariable_Indices(size_t i_ANN, vector variable_names, bool input) const; + +}; + diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp b/SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp new file mode 100644 index 000000000000..682d673f5a6a --- /dev/null +++ b/SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp @@ -0,0 +1,174 @@ +/*! + * \file NeuralNetwork.hpp + * \brief Declaration of the neural network class + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "../../../../Common/include/CConfig.hpp" +#include "../../../../Common/include/linear_algebra/blas_structure.hpp" +#include "Layer.hpp" + +using namespace std; +class Activation_Function { + private: + public: + Activation_Function(){}; + ~Activation_Function(){}; + inline virtual su2double run_activation_function(su2double x) {return 0.0;} +}; + +class Linear : public Activation_Function { + private: + public: + Linear() : Activation_Function() {}; + ~Linear(){}; + su2double run_activation_function(su2double x) final {return x;} +}; + +class Relu : public Activation_Function { + private: + public: + Relu() : Activation_Function() {}; + ~Relu(){}; + su2double run_activation_function(su2double x) final {return max(0.0, x);} +}; + +class SmoothSlope : public Activation_Function { + private: + su2double tanhx; + public: + SmoothSlope() : Activation_Function() {}; + ~SmoothSlope(){}; + su2double run_activation_function(su2double x) final {tanhx = tanh(x); return max(tanhx, x);} +}; + +class None : public Activation_Function { + private: + public: + None() : Activation_Function() {}; + ~None(){}; + su2double run_activation_function(su2double x) final {return 0.0;} +}; + +class NeuralNetwork +{ +private: + vector input_names; + vector output_names; + + unsigned long n_hidden_layers; + Layer *inputLayer; + Layer *outputLayer; + vector< Layer *> hiddenLayers; + vectortotal_layers; + + vector>> weights; + vector weights_mat; + vector> values; + + vector> input_norm; + vector> output_norm; + vector last_inputs; + + enum ENUM_ACTIVATION_FUNCTION { + NONE=0, + LINEAR=1, + RELU=2, + SMOOTH_SLOPE=3, + ELU = 4 + }; + ENUM_ACTIVATION_FUNCTION * activation_function_types; + vector activation_functions; + Activation_Function* current_activation_function; + //Activation_Function *activation_functions; +public: + NeuralNetwork(); + ~NeuralNetwork(){ + for(size_t i=0; isetBias(i_neuron, value);} + void setActivationFunction(unsigned long i_layer, string input); + void displayNetwork(); + void sizeWeights(); + void sizeInputs(unsigned long n_inputs){last_inputs.resize(n_inputs); for(unsigned long iInput=0; iInputgetNNeurons();} + unsigned long getNNeurons(unsigned long iLayer, unsigned long iNeuron){return weights.at(iLayer).at(iNeuron).size();} + + // su2double predict(su2double * X, size_t i_output); + // void predict(vector &inputs, vector &outputs); + void predict(vector &inputs, vector &outputs, su2double**doutputs_dinputs=nullptr); + + //void predict_new(vector &inputs, vector &outputs, su2double**doutputs_dinputs=nullptr); + + void SetInputNorm(unsigned long iInput, su2double input_min, su2double input_max){input_norm.at(iInput) = make_pair(input_min, input_max);} + void SetOutputNorm(unsigned long iOutput, su2double output_min, su2double output_max){output_norm.at(iOutput) = make_pair(output_min, output_max);} + + void PushOutputName(string input){output_names.push_back(input);} + void PushInputName(string input){input_names.push_back(input);} + + string GetInputName(size_t iInput){return input_names[iInput];} + string GetOutputName(size_t iOutput){return output_names[iOutput];} + + size_t GetnInputs(){return input_names.size();} + size_t GetnOutputs(){return output_names.size();} + + void SizeActivationFunctions(unsigned long n_layers){activation_function_types = new ENUM_ACTIVATION_FUNCTION[n_layers]; activation_functions.resize(4); + activation_functions[ENUM_ACTIVATION_FUNCTION::LINEAR] = new Linear(); + activation_functions[ENUM_ACTIVATION_FUNCTION::RELU] = new Relu(); + activation_functions[ENUM_ACTIVATION_FUNCTION::SMOOTH_SLOPE] = new SmoothSlope(); + } + + su2double ComputeX(size_t iLayer, size_t iNeuron){ + su2double x; + x = total_layers[iLayer]->getBias(iNeuron); + size_t nNeurons_previous = total_layers[iLayer - 1]->getNNeurons(); + for(size_t jNeuron=0; jNeurongetValue(jNeuron); + } + return x; + } + // su2double run_activation_function(unsigned long iLayer, su2double x){return activation_functions[iLayer]->run_activation_function(x);} + //void SetActivationFunction(size_t iLayer, unsigned long function_index){activation_functions[iLayer] = ENUM_ACTIVATION_FUNCTION(function_index);} +}; + + + diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/Neuron.hpp b/SU2_CFD/include/numerics/multilayer_perceptron/Neuron.hpp new file mode 100644 index 000000000000..12e1012b09a1 --- /dev/null +++ b/SU2_CFD/include/numerics/multilayer_perceptron/Neuron.hpp @@ -0,0 +1,74 @@ +/*! + * \file LookUp_ANN.hpp + * \brief Declaration of artificial neural network perceptron class + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ +#pragma once + +#include +#include +#include +#include + +#include "../../../../Common/include/CConfig.hpp" +#include "../../../../Common/include/linear_algebra/blas_structure.hpp" + +using namespace std; +class Neuron +{ +private: + string activation_type="SmoothSlope"; + unsigned long ActivationFunction{ENUM_ACTIVATION_FUNCTION::LINEAR}; + unsigned long i_neuron; + double value; + su2double dy_dx; + double bias{0}; + enum ENUM_ACTIVATION_FUNCTION { + NONE=0, + LINEAR=1, + RELU=2, + SMOOTH_SLOPE=3 + }; + +public: + Neuron(){}; + void setNumber(unsigned long input){i_neuron = input;} + unsigned long getNumber(){return i_neuron;} + //void setFunctionType(string input); + + //string getFunctionType(){return activation_type;} + //su2double activation_function(su2double x); + void setValue(su2double input){value = input;} + su2double getValue(){return value;} + + void setBias(su2double input){bias = input;} + su2double getBias(){return bias;} + + su2double getGradient(){return dy_dx;} + void setGradient(su2double input){dy_dx = input;} + //void SetActivationFunction(unsigned long input){ActivationFunction = input;} + + ~Neuron(){}; +}; + diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp b/SU2_CFD/include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp new file mode 100644 index 000000000000..e33d267cbe20 --- /dev/null +++ b/SU2_CFD/include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp @@ -0,0 +1,74 @@ +/*! + * \file ReadNeuralNetwork.hpp + * \brief Declaration of MLP input file reader class + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ +#pragma once + +#include +#include +#include +#include + +#include "../../../../Common/include/CConfig.hpp" +#include "../../../../Common/include/linear_algebra/blas_structure.hpp" + +using namespace std; +class ReadNeuralNetwork { + private: + vector input_names; + vector output_names; + + string filename; + unsigned long n_layers; + vector n_neurons; + vector>> weights; + vector> biases; + vector activation_functions; + + vector> input_norm; + vector> output_norm; + public: + + ReadNeuralNetwork(string filename_in); + void ReadMLPFile(); + + string SkipToFlag(ifstream *file_stream, string flag); + + unsigned long GetNInputs(){return n_neurons.at(0);} + unsigned long GetNOutputs(){return n_neurons.at(n_layers - 1);} + + unsigned long GetNlayers(){return n_layers;} + unsigned long GetNneurons(size_t iLayer){return n_neurons.at(iLayer);} + double long GetWeight(size_t iLayer, size_t iNeuron, size_t jNeuron){return weights.at(iLayer).at(iNeuron).at(jNeuron);} + double long GetBias(size_t iLayer, size_t iNeuron){return biases.at(iLayer).at(iNeuron);} + pair GetInputNorm(size_t iInput){return input_norm.at(iInput);} + pair GetOutputNorm(size_t iOutput){return output_norm.at(iOutput);} + string GetActivationFunction(size_t iLayer){return activation_functions.at(iLayer);} + + string GetInputName(size_t iInput){return input_names.at(iInput);} + string GetOutputName(size_t iOutput){return output_names.at(iOutput);} + + ~ReadNeuralNetwork(){}; +}; \ No newline at end of file diff --git a/SU2_CFD/obj/Makefile.am b/SU2_CFD/obj/Makefile.am index 8d1e64e48cd7..140433ec93a2 100644 --- a/SU2_CFD/obj/Makefile.am +++ b/SU2_CFD/obj/Makefile.am @@ -116,6 +116,12 @@ libSU2Core_sources = ../src/definition_structure.cpp \ ../src/numerics/elasticity/CFEALinearElasticity.cpp \ ../src/numerics/elasticity/CFEANonlinearElasticity.cpp \ ../src/numerics/elasticity/nonlinear_models.cpp \ + ../src/numerics/multilayer_perceptron/LookUp_ANN.cpp \ + ../src/numerics/multilayer_perceptron/IOMap.cpp \ + ../src/numerics/multilayer_perceptron/NeuralNetwork.cpp \ + ../src/numerics/multilayer_perceptron/Neuron.cpp \ + ../src/numerics/multilayer_perceptron/Layer.cpp \ + ../src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp \ ../include/numerics_simd/CNumericsSIMD.cpp \ ../src/output/filewriter/CCSVFileWriter.cpp \ ../src/output/filewriter/CSTLFileWriter.cpp \ diff --git a/SU2_CFD/src/meson.build b/SU2_CFD/src/meson.build index 691db4e5b2dc..6e730d80ffc0 100644 --- a/SU2_CFD/src/meson.build +++ b/SU2_CFD/src/meson.build @@ -147,7 +147,13 @@ su2_cfd_src += files(['numerics/CNumerics.cpp', 'numerics/elasticity/CFEALinearElasticity.cpp', 'numerics/elasticity/CFEANonlinearElasticity.cpp', 'numerics/elasticity/nonlinear_models.cpp', - 'numerics/CGradSmoothing.cpp']) + 'numerics/CGradSmoothing.cpp', + 'numerics/multilayer_perceptron/LookUp_ANN.cpp', + 'numerics/multilayer_perceptron/IOMap.cpp', + 'numerics/multilayer_perceptron/NeuralNetwork.cpp', + 'numerics/multilayer_perceptron/Layer.cpp', + 'numerics/multilayer_perceptron/Neuron.cpp', + 'numerics/multilayer_perceptron/ReadNeuralNetwork.cpp']) su2_cfd_src += files(['../include/numerics_simd/CNumericsSIMD.cpp']) diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/IOMap.cpp b/SU2_CFD/src/numerics/multilayer_perceptron/IOMap.cpp new file mode 100644 index 000000000000..82d2faf19657 --- /dev/null +++ b/SU2_CFD/src/numerics/multilayer_perceptron/IOMap.cpp @@ -0,0 +1,60 @@ +/*! + * \file IOMap.cpp + * \brief Implementation of the input-output mapping class for the + * use of multi-layer perceptrons in SU2 + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ +#include "../../../include/numerics/multilayer_perceptron/LookUp_ANN.hpp" +#include "../../../include/numerics/multilayer_perceptron/IOMap.hpp" +#include +#include +#include +#include + +IOMap::IOMap(LookUp_ANN*ANN_collection, vector &inputs, vector &outputs){ + PairVariableswithANNs(ANN_collection, inputs, outputs); + if(outputs.size() > 0){ + ANN_collection->Check_Use_of_Inputs(inputs, this); + ANN_collection->Check_Use_of_Outputs(outputs, this); + ANN_collection->Check_Duplicate_Outputs(outputs, this); + } +} +void IOMap::PairVariableswithANNs(LookUp_ANN*ANN_collection, vector &inputs, vector &outputs){ + + bool isInput, isOutput,input_match; + for(size_t i_ANN=0; i_ANNGetNANNs(); i_ANN++){ + vector> Input_Indices = ANN_collection->FindVariable_Indices(i_ANN, inputs, true); + isInput = Input_Indices.size() > 0; + if(isInput){ + input_match = true; + vector> Output_Indices = ANN_collection->FindVariable_Indices(i_ANN, outputs, false); + isOutput = Output_Indices.size() > 0; + if(isOutput){ + ANN_indices.push_back(i_ANN); + Input_Map.push_back(Input_Indices); + Output_Map.push_back(Output_Indices); + } + } + } +} \ No newline at end of file diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/Layer.cpp b/SU2_CFD/src/numerics/multilayer_perceptron/Layer.cpp new file mode 100644 index 000000000000..b666e4547d34 --- /dev/null +++ b/SU2_CFD/src/numerics/multilayer_perceptron/Layer.cpp @@ -0,0 +1,62 @@ +/*! + * \file Layer.cpp + * \brief Implementation of the Layer class to be used in the NeuralNetwork + * class + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ +#include "../../../include/numerics/multilayer_perceptron/Layer.hpp" +#include +using namespace std; + +Layer::Layer() : Layer(1) {}; + +Layer::Layer(unsigned long n_neurons) : number_of_neurons{n_neurons}, input{false} +{ + neurons = new Neuron[n_neurons]; + for(size_t i=0; i. + */ +#include "../../../include/numerics/multilayer_perceptron/LookUp_ANN.hpp" +#include "../../../include/numerics/multilayer_perceptron/IOMap.hpp" +#include "../../../include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp" +#include +#include +#include +#include + +using namespace std; + +LookUp_ANN::LookUp_ANN(string inputFileName) +{ + + #ifdef HAVE_MPI + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + #endif + if(rank == MASTER_NODE) + cout << "Generating ANN collection" << endl; + + ReadANNInputFile(inputFileName); + for(size_t i_ANN=0; i_ANN> LookUp_ANN::FindVariable_Indices(size_t i_ANN, vector variable_names, bool input) const { + vector> variable_indices; + size_t nVar = input ? NeuralNetworks[i_ANN]->GetnInputs() : NeuralNetworks[i_ANN]->GetnOutputs(); + + for(size_t iVar=0; iVarGetInputName(iVar) : NeuralNetworks[i_ANN]->GetOutputName(iVar); + + if(variable_names.at(jVar).compare(ANN_varname) == 0){ + variable_indices.push_back(make_pair(jVar, iVar)); + //cout << variable_names[jVar] << " : " << jVar << " | " << ANN_varname << " : " << iVar << endl; + } + } + } + return variable_indices; +} + +void LookUp_ANN::Predict_ANN(IOMap *input_output_map, vector& inputs, vector& outputs){ + for(size_t i_map=0; i_mapGetNANNs(); i_map++){ + size_t i_ANN = input_output_map->GetANNIndex(i_map); + vector ANN_inputs = input_output_map->GetANN_Inputs(i_map, inputs); + vector ANN_outputs = input_output_map->GetANN_Outputs(i_map, outputs); + NeuralNetworks[i_ANN]->predict(ANN_inputs, ANN_outputs); + } +} +void LookUp_ANN::GenerateANN(NeuralNetwork * ANN, string fileName) +{ + ReadNeuralNetwork Reader = ReadNeuralNetwork(fileName); + + // Read MLP input file + Reader.ReadMLPFile(); + + // Generate basic layer architectures + ANN->defineInputLayer(Reader.GetNInputs()); + ANN->sizeInputs(Reader.GetNInputs()); + for(size_t iInput=0; iInputPushInputName(Reader.GetInputName(iInput)); + } + for(size_t iLayer=1; iLayerpush_hidden_layer(Reader.GetNneurons(iLayer)); + } + ANN->defineOutputLayer(Reader.GetNOutputs()); + for(size_t iOutput=0; iOutputPushOutputName(Reader.GetOutputName(iOutput)); + } + + // Size weights of each layer + ANN->sizeWeights(); + + // Define weights and activation functions + ANN->SizeActivationFunctions(ANN->getNWeightLayers()+1); + for(size_t i_layer = 0; i_layer < ANN->getNWeightLayers(); i_layer++){ + ANN->setActivationFunction(i_layer, Reader.GetActivationFunction(i_layer)); + for(size_t i_neuron=0; i_neuron < ANN->getNNeurons(i_layer); i_neuron++){ + for(size_t j_neuron=0; j_neurongetNNeurons(i_layer, i_neuron); j_neuron++){ + ANN->setWeight(i_layer, i_neuron, j_neuron, Reader.GetWeight(i_layer, i_neuron, j_neuron)); + } + } + + + } + ANN->setActivationFunction(ANN->getNWeightLayers(), Reader.GetActivationFunction(ANN->getNWeightLayers())); + + // Set neuron biases + for(size_t i_layer=0; i_layergetNWeightLayers()+1; i_layer++){ + for(size_t i_neuron=0; i_neurongetNNeurons(i_layer); i_neuron++){ + ANN->setBias(i_layer, i_neuron, Reader.GetBias(i_layer, i_neuron)); + } + } + + // Define input and output layer normalization values + for(unsigned long iInput=0; iInputSetInputNorm(iInput, Reader.GetInputNorm(iInput).first, Reader.GetInputNorm(iInput).second); + } + for(unsigned long iOutput=0; iOutputSetOutputNorm(iOutput, Reader.GetOutputNorm(iOutput).first, Reader.GetOutputNorm(iOutput).second); + } + +} +void LookUp_ANN::ReadANNInputFile(string inputFileName) +{ + ifstream file_stream; + istringstream stream_names_var; + istringstream stream_filenames; + file_stream.open(inputFileName.c_str(), ifstream::in); + + if (!file_stream.is_open()) { + SU2_MPI::Error(string("There is no MLP collection file file called ") + inputFileName, + CURRENT_FUNCTION); + } + + string line; + string word; + + line = SkipToFlag(&file_stream, "[number of MLP files]"); + getline(file_stream, line); + number_of_variables = stoul(line); + NeuralNetworks.resize(number_of_variables); + + line = SkipToFlag(&file_stream, "[MLP file names]"); + getline(file_stream, line); + stream_filenames.str(line); + while(stream_filenames){ + stream_filenames >> word; + ANN_filenames.push_back(word); + } + ANN_filenames.pop_back(); +} +string LookUp_ANN::SkipToFlag(ifstream *file_stream, string flag) { + string line; + getline(*file_stream, line); + + while (line.compare(flag) != 0 && !(*file_stream).eof()) { + getline(*file_stream, line); + } + + if ((*file_stream).eof()) + SU2_MPI::Error("Flag not found in file", CURRENT_FUNCTION); + + return line; +} + + +bool LookUp_ANN::Check_Duplicate_Outputs(vector &output_names, IOMap *input_output_map) const { + unsigned short n_occurances; + bool duplicate{false}; + vector duplicate_variables; + for(size_t i_Output =0; i_Output < output_names.size(); i_Output++){ + n_occurances = 0; + for(size_t i_map=0; i_mapGetNANNs(); i_map++){ + size_t i_ANN = input_output_map->GetANNIndex(i_map); + vector> output_map = input_output_map->GetOutputMapping(i_map); + for(size_t j_Output=0; j_Output 1){ + duplicate_variables.push_back(output_names[i_Output]); + duplicate = true; + } + } + if(duplicate){ + string message{"Variables "}; + for(size_t iVar=0; iVar &input_names, IOMap *input_output_map) const { +unsigned short n_inputs = input_names.size(); +vector missing_inputs; +bool inputs_are_present{true}; +for(size_t iInput=0; iInputGetNANNs(); i_map++){ + size_t i_ANN = input_output_map->GetANNIndex(i_map); + vector> input_map = input_output_map->GetInputMapping(i_map); + for(size_t jInput=0; jInput 0){ + string message{"Inputs "}; + for(size_t iVar=0; iVar &output_names, IOMap * input_output_map) const { + /* Check wether all output variables are in the loaded MLPs */ + + vector missing_outputs; + bool outputs_are_present{true}; + /* Looping over the target outputs */ + for(size_t iOutput=0; iOutputGetNANNs(); i_map++){ + size_t i_ANN = input_output_map->GetANNIndex(i_map); + vector> output_map = input_output_map->GetOutputMapping(i_map); + + /* Looping over the outputs of the output map of the current ANN */ + for(size_t jOutput=0; jOutput 0){ + string message{"Outputs "}; + for(size_t iVar=0; iVar. + */ +#include "../../../include/numerics/multilayer_perceptron/NeuralNetwork.hpp" +#include "../../../include/numerics/multilayer_perceptron/Layer.hpp" +#include +#include "../../../include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp" +#include "../../../Common/include/toolboxes/geometry_toolbox.hpp" +using namespace std; + + +void NeuralNetwork::predict(vector &inputs, vector&outputs, su2double**doutputs_dinputs){ + su2double x, x_norm, y, y_norm, tanx; + size_t iNeuron, jNeuron, iLayer, nNeurons_current, nNeurons_previous; + bool same_point{true}; + for(iNeuron=0; iNeurongetNNeurons(); iNeuron++){ + x_norm = (inputs[iNeuron] - input_norm[iNeuron].first)/(input_norm[iNeuron].second - input_norm[iNeuron].first); + if(abs(x_norm - inputLayer->getValue(iNeuron)) > 0) same_point = false; + inputLayer->setValue(iNeuron, x_norm); + } + if(!same_point){ + for(iLayer=1; iLayergetNNeurons(); + nNeurons_previous = total_layers[iLayer - 1]->getNNeurons(); + + + switch (activation_function_types[iLayer]) + { + case ENUM_ACTIVATION_FUNCTION::SMOOTH_SLOPE: + for(iNeuron=0; iNeuron 0 ? x : tanh(x); + total_layers[iLayer]->setValue(iNeuron, y); + } + break; + case ENUM_ACTIVATION_FUNCTION::ELU: + for(iNeuron=0; iNeuron 0 ? x : (exp(x) - 1); + total_layers[iLayer]->setValue(iNeuron, y); + } + break; + case ENUM_ACTIVATION_FUNCTION::LINEAR: + for(iNeuron=0; iNeuronsetValue(iNeuron, y); + } + break; + case ENUM_ACTIVATION_FUNCTION::RELU: + for(iNeuron=0; iNeuron 0.0 ? x : 0.0; + total_layers[iLayer]->setValue(iNeuron, y); + } + break; + case ENUM_ACTIVATION_FUNCTION::NONE: + for(iNeuron=0; iNeuronsetValue(iNeuron, y); + } + break; + default: + break; + } + + } + } + for(iNeuron=0; iNeurongetNNeurons(); iNeuron++){ + y_norm = outputLayer->getValue(iNeuron); + y = y_norm*(output_norm[iNeuron].second - output_norm[iNeuron].first) + output_norm[iNeuron].first; + + // Setting output value + *outputs[iNeuron] = y; + } +} +// void NeuralNetwork::predict(vector &inputs, vector&outputs, su2double**doutputs_dinputs){ +// // Evaluate the MLP based on the inputs. In case a pointer to the output gradient is provided, the partial +// // derivatives of the outputs with respect to the inputs are evaluated as well. + +// su2double *y_values_old, // Perceptron values of the previous layer +// *y_values_new, // Perceptron values of the current layer +// **dy_dx_old, // Derivatives of the previous layer +// **dy_dx_new; // Derivatives of the current layer + +// su2double x, // Perceptron input +// x_norm, // Normalized MLP input +// y, // Perceptron output +// y_norm; // Normalized MLP output + +// size_t n_inputs{inputLayer->getNNeurons()}; // Number of inputs + +// bool compute_gradient = (doutputs_dinputs != nullptr); // Gradient computation option + +// // Stepping though the MLP layers, communicating the output from the previous layer to the next. +// // TODO: make this recursive to increase performance and code simplification +// for(size_t iLayer=0; iLayer < total_layers.size(); iLayer++){ + +// // In case the input layer is called, input normalization is applied. +// if(total_layers.at(iLayer)->isInput()){ +// // Generating an array for the input layer preceptron values. +// y_values_old = new su2double[total_layers.at(iLayer)->getNNeurons()]; + +// if(compute_gradient) +// dy_dx_old = new su2double*[total_layers.at(iLayer)->getNNeurons()]; + +// for(size_t iNeuron=0; iNeurongetNNeurons(); iNeuron++){ +// // Normalizing the inputs +// x_norm = (inputs[iNeuron] - input_norm.at(iNeuron).first)/(input_norm.at(iNeuron).second - input_norm.at(iNeuron).first); + +// // Set perceptron values of input layer. +// total_layers.at(iLayer)->setValue(iNeuron, x_norm); +// y_values_old[iNeuron] = x_norm; + +// if(compute_gradient){ +// dy_dx_old[iNeuron] = new su2double[n_inputs]; +// for(size_t jInput=0; jInputgetNNeurons()]; + +// if(compute_gradient) +// dy_dx_new = new su2double*[total_layers.at(iLayer)->getNNeurons()]; + +// for(size_t iNeuron=0; iNeurongetNNeurons(); iNeuron++){ +// // Multiplying the weights and output values of the previous layer to generate the current perceptron input +// x = GeometryToolbox::DotProduct(total_layers.at(iLayer-1)->getNNeurons(), y_values_old, weights_mat[iLayer-1][iNeuron]); + +// // Generating the perceptron output +// y = total_layers.at(iLayer)->activation_function(iNeuron, x); + +// // Storing perceptron output +// total_layers.at(iLayer)->setValue(iNeuron, y); +// y_values_new[iNeuron] = y; + +// if(compute_gradient){ +// dy_dx_new[iNeuron] = new su2double[n_inputs]; +// for(size_t jInput=0; jInput < n_inputs; jInput++) dy_dx_new[iNeuron][jInput] = 0.0; + +// for(size_t jNeuron=0; jNeurongetNNeurons(); jNeuron++){ +// for(size_t jInput=0; jInputgetNNeurons(); jInput++){ +// dy_dx_new[iNeuron][jInput] += total_layers.at(iLayer)->getdYdX(iNeuron) * weights_mat[iLayer-1][iNeuron][jNeuron] * dy_dx_old[jNeuron][jInput]; +// } +// } +// } + +// } +// // Resetting pointer to previous layer outputs +// delete y_values_old; +// y_values_old = y_values_new; + +// if(compute_gradient){ +// for(size_t jNeuron=0; jNeurongetNNeurons(); jNeuron++){ +// delete [] dy_dx_old[jNeuron]; +// } +// delete [] dy_dx_old; +// dy_dx_old = dy_dx_new; +// } +// } +// } +// delete y_values_old; + +// // Dimensionalize the MLP outputs and gradients +// for(size_t iOutput=0; iOutputgetNNeurons(); iOutput++){ + +// // Dimensionalize the outputs +// y_norm = outputLayer->getValue(iOutput); +// y = y_norm*(output_norm.at(iOutput).second - output_norm.at(iOutput).first) + output_norm.at(iOutput).first; + +// // Setting output value +// *outputs.at(iOutput) = y; + +// if(compute_gradient){ +// for(size_t iInput=0; iInputsetInput(true); + input_norm.resize(n_neurons); +} + +void NeuralNetwork::defineOutputLayer(unsigned long n_neurons){ + //cout << "Creating an output layer with " << n_neurons << " neurons. "<< endl; + outputLayer = new Layer(n_neurons); + output_norm.resize(n_neurons); +} + +void NeuralNetwork::push_hidden_layer(unsigned long n_neurons){ + Layer *newLayer = new Layer(n_neurons); + //cout << "Creating a hidden layer with " << n_neurons << " neurons. "<< endl; + hiddenLayers.push_back(newLayer); + n_hidden_layers ++; +} + +void NeuralNetwork::sizeWeights(){ + unsigned long i_layer{0}; + weights.resize(n_hidden_layers + 1); + weights.at(i_layer).resize(inputLayer->getNNeurons()); + Layer * previouslayer = inputLayer; + + if(n_hidden_layers != 0){ + for(size_t i_hidden_layer=0; i_hidden_layer < n_hidden_layers; i_hidden_layer++){ + for(size_t i_neuron=0; i_neuron < previouslayer->getNNeurons(); i_neuron++){ + weights.at(i_layer).at(i_neuron).resize(hiddenLayers.at(i_hidden_layer)->getNNeurons()); + } + previouslayer = hiddenLayers.at(i_hidden_layer); + i_layer ++; + weights.at(i_layer).resize(previouslayer->getNNeurons()); + } + } + for(size_t i_neuron=0; i_neuron < previouslayer->getNNeurons(); i_neuron++){ + weights.at(i_layer).at(i_neuron).resize(outputLayer->getNNeurons()); + } + + total_layers.resize(n_hidden_layers + 2); + total_layers.at(0) = inputLayer; + for(size_t iLayer=0; iLayergetNNeurons(), inputLayer->getNNeurons()); + for(size_t iLayer=1; iLayergetNNeurons(), hiddenLayers[iLayer-1]->getNNeurons()); + } + weights_mat[n_hidden_layers].resize(outputLayer->getNNeurons(), hiddenLayers[n_hidden_layers-1]->getNNeurons()); + +} + +void NeuralNetwork::setWeight(unsigned long i_layer, unsigned long i_neuron, unsigned long j_neuron, su2double value){ + //weights.at(i_layer).at(i_neuron).at(j_neuron) = value; + weights_mat[i_layer][j_neuron][i_neuron] = value; +} +void NeuralNetwork::displayNetwork(){ + cout << "Input layer: " << inputLayer->getNNeurons() << " neurons, Activation Function: " << inputLayer->getActivationType() << endl; + for(size_t i=0; igetNNeurons(); i++){ + for(size_t j=0; jgetNNeurons(); j++){ + cout << weights_mat[0][i][j] << " "; + } + su2double thingy[] = {1, 1, 1}; + cout << GeometryToolbox::DotProduct(inputLayer->getNNeurons(), weights_mat[0][i], thingy) << endl; + cout << endl; + } + for(size_t i_layer=0; i_layer < hiddenLayers.size(); i_layer++){ + cout << "Hidden layer " << i_layer + 1 << ": " << hiddenLayers.at(i_layer)->getNNeurons() << " neurons, Activation Function: " << hiddenLayers.at(i_layer) ->getActivationType() << endl; + for(size_t i=0; igetNNeurons() <<" neurons, Activation Function: " << outputLayer->getActivationType() << endl; +} + +void NeuralNetwork::setActivationFunction(unsigned long i_layer, string input) +{ + if(input.compare("linear") == 0){ + //activation_functions[i_layer] = new Linear(); + activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::LINEAR; + return; + } + if(input.compare("relu") == 0){ + //activation_functions[i_layer] = new Relu(); + activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::RELU; + return; + } + if((input.compare("smooth_slope") == 0)){ + //activation_functions[i_layer] = new SmoothSlope(); + activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::SMOOTH_SLOPE; + return; + } + if((input.compare("elu") == 0)){ + //activation_functions[i_layer] = new SmoothSlope(); + activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::ELU; + return; + } + if(input.compare("none") == 0){ + //activation_functions[i_layer] = new None(); + activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::NONE; + return; + } + //total_layers.at(i_layer)->setActivationFunction(input); + return; +} diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/Neuron.cpp b/SU2_CFD/src/numerics/multilayer_perceptron/Neuron.cpp new file mode 100644 index 000000000000..4fbef6a462e6 --- /dev/null +++ b/SU2_CFD/src/numerics/multilayer_perceptron/Neuron.cpp @@ -0,0 +1,114 @@ +/*! + * \file Neuron.cpp + * \brief Implementation of the neuron class to be used within the + * Layer class as a part of the NeuralNetwork class. + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ +#include "../../../include/numerics/multilayer_perceptron/Neuron.hpp" +#include +#include +#include + +using namespace std; +// Neuron::Neuron() +// { +// } + +// su2double Neuron::activation_function(su2double x) +// { +// x += bias; +// su2double tanx; +// switch(ActivationFunction){ +// case ENUM_ACTIVATION_FUNCTION::LINEAR: +// dy_dx = 1; +// return x; +// break; +// case ENUM_ACTIVATION_FUNCTION::RELU: +// if(x > 0.0){ +// dy_dx = 1.0; +// return x; +// }else{ +// dy_dx = 0.0; +// return 0.0; +// } +// break; +// case ENUM_ACTIVATION_FUNCTION::SMOOTH_SLOPE: +// tanx = tanh(x); +// if(tanx > x){ +// dy_dx = 4.0 / pow((exp(-x) + exp(x)), 2); +// return tanx; +// }else{ +// dy_dx = 1.0; +// return x; +// } +// break; +// default: +// SU2_MPI::Error(string("Unknown activation function type: ")+activation_type, +// CURRENT_FUNCTION); +// return 0.0; +// break; +// } +// // if(activation_type.compare("smooth_slope") == 0){ +// // su2double tanx = tanh(x); +// // if(tanx > x){ +// // dy_dx = 4.0 / pow((exp(-x) + exp(x)), 2); +// // return tanx; +// // }else{ +// // dy_dx = 1.0; +// // return x; +// // } + +// // } +// // if(activation_type.compare("linear") == 0){ +// // dy_dx = 1.0; +// // return x; +// // } +// // if(activation_type.compare("relu") == 0){ +// // su2double zero{0.0}; +// // if(x > zero){ +// // dy_dx = 1.0; +// // return x; +// // }else{ +// // dy_dx = 0.0; +// // return zero; +// // } +// // } + +// // SU2_MPI::Error(string("Unknown activation function type: ")+activation_type, +// // CURRENT_FUNCTION); + +// // return x; +// } + +// void Neuron::setFunctionType(string input){ +// activation_type = input; + +// if(input.compare("smooth_slope") == 0) ActivationFunction = ENUM_ACTIVATION_FUNCTION::SMOOTH_SLOPE; return; + +// if(input.compare("linear") == 0) ActivationFunction = ENUM_ACTIVATION_FUNCTION::LINEAR; return; + +// if(input.compare("relu") == 0) ActivationFunction = ENUM_ACTIVATION_FUNCTION::RELU; return; + +// if(input.compare("none") == 0) ActivationFunction = ENUM_ACTIVATION_FUNCTION::NONE; return; +// } \ No newline at end of file diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp b/SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp new file mode 100644 index 000000000000..5c137397bb83 --- /dev/null +++ b/SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp @@ -0,0 +1,151 @@ +/*! + * \file ReadNeuralNetwork.cpp + * \brief Implementation of the reader class to read .mlp input files + * used to set up multi-layer perceptrons. + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ +#include "../../../include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp" +using namespace std; + +ReadNeuralNetwork::ReadNeuralNetwork(string filename_in){ + filename = filename_in; +}; + +void ReadNeuralNetwork::ReadMLPFile(){ + ifstream file_stream; + file_stream.open(filename.c_str(), ifstream::in); + if (!file_stream.is_open()) { + SU2_MPI::Error(string("There is no MLP file file called ") + filename, + CURRENT_FUNCTION); + } + + string line, word; + double input_min, input_max, output_min, output_max; + + line = SkipToFlag(&file_stream, "[number of layers]"); + getline(file_stream, line); + n_layers = stoul(line); + n_neurons.resize(n_layers); + biases.resize(n_layers); + weights.resize(n_layers-1); + activation_functions.resize(n_layers); + line = SkipToFlag(&file_stream, "[neurons per layer]"); + for(size_t iLayer=0; iLayer> word; + activation_functions.at(iLayer) = word; + } + + line = SkipToFlag(&file_stream, "[input names]"); + for(size_t iInput=0; iInput < n_neurons.at(0); iInput++){ + getline(file_stream, line); + input_names.push_back(line); + } + + line = SkipToFlag(&file_stream, "[input normalization]"); + for(size_t iInput=0; iInput> word; + input_min = stold(word); + input_norm_stream >> word; + input_max = stold(word); + input_norm.at(iInput) = make_pair(input_min, input_max); + } + + line = SkipToFlag(&file_stream, "[output names]"); + for(size_t iOutput=0; iOutput < n_neurons.at(n_neurons.size()-1); iOutput++){ + getline(file_stream, line); + output_names.push_back(line); + } + + line = SkipToFlag(&file_stream, "[output normalization]"); + for(size_t iOutput=0; iOutput> word; + output_min = stold(word); + output_norm_stream >> word; + output_max = stold(word); + output_norm.at(iOutput) = make_pair(output_min, output_max); + } + + line = SkipToFlag(&file_stream, "[weights per layer]"); + for(size_t iLayer=0; iLayer> word; + weights.at(iLayer).at(iNeuron).at(jNeuron) = stold(word); + } + } + getline(file_stream, line); + } + + line = SkipToFlag(&file_stream, "[biases per layer]"); + for(size_t iLayer=0; iLayer> word; + biases.at(iLayer).at(iNeuron) = stold(word); + } + } + + + +} + +string ReadNeuralNetwork::SkipToFlag(ifstream *file_stream, string flag) { + string line; + getline(*file_stream, line); + + while (line.compare(flag) != 0 && !(*file_stream).eof()) { + getline(*file_stream, line); + } + + if ((*file_stream).eof()) + cout << "line not in file!" << endl; + + return line; +} \ No newline at end of file From f5d0267d47a8a235c4e3e026ca6faba0806507e2 Mon Sep 17 00:00:00 2001 From: EvertBunschoten Date: Tue, 13 Sep 2022 11:58:30 +0200 Subject: [PATCH 02/84] Added error checking in MLP file reader and fixed bug with input-output mapping preventing single, specific outputs from MLP's to be evaluated --- .../numerics/multilayer_perceptron/IOMap.hpp | 3 + .../multilayer_perceptron/NeuralNetwork.hpp | 4 +- .../multilayer_perceptron/LookUp_ANN.cpp | 6 +- .../multilayer_perceptron/NeuralNetwork.cpp | 119 +-------- .../ReadNeuralNetwork.cpp | 234 +++++++++++++----- 5 files changed, 192 insertions(+), 174 deletions(-) diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp b/SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp index 66878762edbe..2e2f14b1d757 100644 --- a/SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp +++ b/SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp @@ -56,6 +56,9 @@ class IOMap size_t GetANNIndex(size_t i_Map){return ANN_indices[i_Map];} size_t GetInputIndex(size_t i_Map, size_t iInput){return Input_Map[i_Map][iInput].first;} size_t GetOutputIndex(size_t i_Map, size_t iOutput){return Output_Map[i_Map][iOutput].first;} + size_t GetANNOutputIndex(size_t i_Map, size_t iOutput){return Output_Map[i_Map][iOutput].second;} + size_t GetNMappedOutputs(size_t i_Map){return Output_Map[i_Map].size();} + vector> GetOutputMapping(size_t i_map){return Output_Map[i_map];} vector> GetInputMapping(size_t i_map){return Input_Map[i_map];} diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp b/SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp index 682d673f5a6a..a5cb8beeffa3 100644 --- a/SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp +++ b/SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp @@ -98,6 +98,7 @@ class NeuralNetwork vector> input_norm; vector> output_norm; vector last_inputs; + su2double* ANN_outputs; enum ENUM_ACTIVATION_FUNCTION { NONE=0, @@ -135,7 +136,7 @@ class NeuralNetwork // su2double predict(su2double * X, size_t i_output); // void predict(vector &inputs, vector &outputs); - void predict(vector &inputs, vector &outputs, su2double**doutputs_dinputs=nullptr); + void predict(vector &inputs); //void predict_new(vector &inputs, vector &outputs, su2double**doutputs_dinputs=nullptr); @@ -151,6 +152,7 @@ class NeuralNetwork size_t GetnInputs(){return input_names.size();} size_t GetnOutputs(){return output_names.size();} + su2double GetANN_Output(size_t iOutput){return ANN_outputs[iOutput];} void SizeActivationFunctions(unsigned long n_layers){activation_function_types = new ENUM_ACTIVATION_FUNCTION[n_layers]; activation_functions.resize(4); activation_functions[ENUM_ACTIVATION_FUNCTION::LINEAR] = new Linear(); activation_functions[ENUM_ACTIVATION_FUNCTION::RELU] = new Relu(); diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/LookUp_ANN.cpp b/SU2_CFD/src/numerics/multilayer_perceptron/LookUp_ANN.cpp index 26c4c02e6451..3999cf47ba73 100644 --- a/SU2_CFD/src/numerics/multilayer_perceptron/LookUp_ANN.cpp +++ b/SU2_CFD/src/numerics/multilayer_perceptron/LookUp_ANN.cpp @@ -76,8 +76,10 @@ void LookUp_ANN::Predict_ANN(IOMap *input_output_map, vector& inputs, for(size_t i_map=0; i_mapGetNANNs(); i_map++){ size_t i_ANN = input_output_map->GetANNIndex(i_map); vector ANN_inputs = input_output_map->GetANN_Inputs(i_map, inputs); - vector ANN_outputs = input_output_map->GetANN_Outputs(i_map, outputs); - NeuralNetworks[i_ANN]->predict(ANN_inputs, ANN_outputs); + NeuralNetworks[i_ANN]->predict(ANN_inputs); + for(size_t i=0; i < input_output_map->GetNMappedOutputs(i_map); i++){ + *outputs[input_output_map->GetOutputIndex(i_map, i)] = NeuralNetworks[i_ANN]->GetANN_Output(input_output_map->GetANNOutputIndex(i_map, i)); + } } } void LookUp_ANN::GenerateANN(NeuralNetwork * ANN, string fileName) diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/NeuralNetwork.cpp b/SU2_CFD/src/numerics/multilayer_perceptron/NeuralNetwork.cpp index 2cfa5a165d6c..2ece6768c590 100644 --- a/SU2_CFD/src/numerics/multilayer_perceptron/NeuralNetwork.cpp +++ b/SU2_CFD/src/numerics/multilayer_perceptron/NeuralNetwork.cpp @@ -33,7 +33,7 @@ using namespace std; -void NeuralNetwork::predict(vector &inputs, vector&outputs, su2double**doutputs_dinputs){ +void NeuralNetwork::predict(vector &inputs){ su2double x, x_norm, y, y_norm, tanx; size_t iNeuron, jNeuron, iLayer, nNeurons_current, nNeurons_previous; bool same_point{true}; @@ -95,121 +95,9 @@ void NeuralNetwork::predict(vector &inputs, vector&output y = y_norm*(output_norm[iNeuron].second - output_norm[iNeuron].first) + output_norm[iNeuron].first; // Setting output value - *outputs[iNeuron] = y; + ANN_outputs[iNeuron] = y; } } -// void NeuralNetwork::predict(vector &inputs, vector&outputs, su2double**doutputs_dinputs){ -// // Evaluate the MLP based on the inputs. In case a pointer to the output gradient is provided, the partial -// // derivatives of the outputs with respect to the inputs are evaluated as well. - -// su2double *y_values_old, // Perceptron values of the previous layer -// *y_values_new, // Perceptron values of the current layer -// **dy_dx_old, // Derivatives of the previous layer -// **dy_dx_new; // Derivatives of the current layer - -// su2double x, // Perceptron input -// x_norm, // Normalized MLP input -// y, // Perceptron output -// y_norm; // Normalized MLP output - -// size_t n_inputs{inputLayer->getNNeurons()}; // Number of inputs - -// bool compute_gradient = (doutputs_dinputs != nullptr); // Gradient computation option - -// // Stepping though the MLP layers, communicating the output from the previous layer to the next. -// // TODO: make this recursive to increase performance and code simplification -// for(size_t iLayer=0; iLayer < total_layers.size(); iLayer++){ - -// // In case the input layer is called, input normalization is applied. -// if(total_layers.at(iLayer)->isInput()){ -// // Generating an array for the input layer preceptron values. -// y_values_old = new su2double[total_layers.at(iLayer)->getNNeurons()]; - -// if(compute_gradient) -// dy_dx_old = new su2double*[total_layers.at(iLayer)->getNNeurons()]; - -// for(size_t iNeuron=0; iNeurongetNNeurons(); iNeuron++){ -// // Normalizing the inputs -// x_norm = (inputs[iNeuron] - input_norm.at(iNeuron).first)/(input_norm.at(iNeuron).second - input_norm.at(iNeuron).first); - -// // Set perceptron values of input layer. -// total_layers.at(iLayer)->setValue(iNeuron, x_norm); -// y_values_old[iNeuron] = x_norm; - -// if(compute_gradient){ -// dy_dx_old[iNeuron] = new su2double[n_inputs]; -// for(size_t jInput=0; jInputgetNNeurons()]; - -// if(compute_gradient) -// dy_dx_new = new su2double*[total_layers.at(iLayer)->getNNeurons()]; - -// for(size_t iNeuron=0; iNeurongetNNeurons(); iNeuron++){ -// // Multiplying the weights and output values of the previous layer to generate the current perceptron input -// x = GeometryToolbox::DotProduct(total_layers.at(iLayer-1)->getNNeurons(), y_values_old, weights_mat[iLayer-1][iNeuron]); - -// // Generating the perceptron output -// y = total_layers.at(iLayer)->activation_function(iNeuron, x); - -// // Storing perceptron output -// total_layers.at(iLayer)->setValue(iNeuron, y); -// y_values_new[iNeuron] = y; - -// if(compute_gradient){ -// dy_dx_new[iNeuron] = new su2double[n_inputs]; -// for(size_t jInput=0; jInput < n_inputs; jInput++) dy_dx_new[iNeuron][jInput] = 0.0; - -// for(size_t jNeuron=0; jNeurongetNNeurons(); jNeuron++){ -// for(size_t jInput=0; jInputgetNNeurons(); jInput++){ -// dy_dx_new[iNeuron][jInput] += total_layers.at(iLayer)->getdYdX(iNeuron) * weights_mat[iLayer-1][iNeuron][jNeuron] * dy_dx_old[jNeuron][jInput]; -// } -// } -// } - -// } -// // Resetting pointer to previous layer outputs -// delete y_values_old; -// y_values_old = y_values_new; - -// if(compute_gradient){ -// for(size_t jNeuron=0; jNeurongetNNeurons(); jNeuron++){ -// delete [] dy_dx_old[jNeuron]; -// } -// delete [] dy_dx_old; -// dy_dx_old = dy_dx_new; -// } -// } -// } -// delete y_values_old; - -// // Dimensionalize the MLP outputs and gradients -// for(size_t iOutput=0; iOutputgetNNeurons(); iOutput++){ - -// // Dimensionalize the outputs -// y_norm = outputLayer->getValue(iOutput); -// y = y_norm*(output_norm.at(iOutput).second - output_norm.at(iOutput).first) + output_norm.at(iOutput).first; - -// // Setting output value -// *outputs.at(iOutput) = y; - -// if(compute_gradient){ -// for(size_t iInput=0; iInputgetNNeurons(), hiddenLayers[n_hidden_layers-1]->getNNeurons()); -} + ANN_outputs = new su2double[outputLayer->getNNeurons()]; +} void NeuralNetwork::setWeight(unsigned long i_layer, unsigned long i_neuron, unsigned long j_neuron, su2double value){ //weights.at(i_layer).at(i_neuron).at(j_neuron) = value; diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp b/SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp index 5c137397bb83..13d0b64a14fe 100644 --- a/SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp +++ b/SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp @@ -42,71 +42,193 @@ void ReadNeuralNetwork::ReadMLPFile(){ string line, word; double input_min, input_max, output_min, output_max; + bool eoHeader{false}, found_layercount{false}, found_input_names{false}, found_output_names{false}; - line = SkipToFlag(&file_stream, "[number of layers]"); - getline(file_stream, line); - n_layers = stoul(line); - n_neurons.resize(n_layers); - biases.resize(n_layers); - weights.resize(n_layers-1); - activation_functions.resize(n_layers); - line = SkipToFlag(&file_stream, "[neurons per layer]"); - for(size_t iLayer=0; iLayer"); + + while(getline(file_stream, line) && !eoHeader){ + if(line.compare("[number of layers]") == 0){ + getline(file_stream, line); + n_layers = stoul(line); + n_neurons.resize(n_layers); + biases.resize(n_layers); + weights.resize(n_layers-1); + activation_functions.resize(n_layers); + + found_layercount = true; } - } - input_norm.resize(n_neurons.at(0)); - output_norm.resize(n_neurons.at(n_neurons.size()-1)); + if(line.compare("[neurons per layer]") == 0){ + if(!found_layercount){ + SU2_MPI::Error("No layer count provided before defining neuron count per layer", CURRENT_FUNCTION); + } + // Loop over layer count and size neuron count and bias count per layer accordingly + for(size_t iLayer=0; iLayer> word; - activation_functions.at(iLayer) = word; - } + // Size output normalization according to number of outputs + output_norm.resize(n_neurons.at(n_neurons.size()-1)); + // Set default output normalization + for(size_t iNeuron=0; iNeuron> word; + activation_functions.at(iLayer) = word; + } + } - line = SkipToFlag(&file_stream, "[input normalization]"); - for(size_t iInput=0; iInput> word; - input_min = stold(word); - input_norm_stream >> word; - input_max = stold(word); - input_norm.at(iInput) = make_pair(input_min, input_max); - } + if(line.compare("[input names]") == 0){ + found_input_names = true; + unsigned short j{1}; + getline(file_stream, line); + while(line.compare("") != 0){ + input_names.push_back(line); + getline(file_stream, line); + } - line = SkipToFlag(&file_stream, "[output names]"); - for(size_t iOutput=0; iOutput < n_neurons.at(n_neurons.size()-1); iOutput++){ - getline(file_stream, line); - output_names.push_back(line); - } + if(input_names.size() != n_neurons.at(0)){ + SU2_MPI::Error("Number of input variable names inconsistent with number of MLP inputs", CURRENT_FUNCTION); + } + } - line = SkipToFlag(&file_stream, "[output normalization]"); - for(size_t iOutput=0; iOutput> word; - output_min = stold(word); - output_norm_stream >> word; - output_max = stold(word); - output_norm.at(iOutput) = make_pair(output_min, output_max); + if(line.compare("[input normalization]") == 0){ + for(size_t iInput=0; iInput> word; + input_min = stold(word); + input_norm_stream >> word; + input_max = stold(word); + input_norm.at(iInput) = make_pair(input_min, input_max); + } + } + } + + if(line.compare("[output names]") == 0){ + found_output_names = true; + getline(file_stream, line); + while(line.compare("") != 0){ + output_names.push_back(line); + getline(file_stream, line); + } + + if(output_names.size() != (n_neurons.at(n_neurons.size()-1))){ + SU2_MPI::Error("Number of output variable names inconsistent with number of MLP outputs", CURRENT_FUNCTION); + } + } + + if(line.compare("[output normalization]") == 0){ + for(size_t iOutput=0; iOutput> word; + output_min = stold(word); + output_norm_stream >> word; + output_max = stold(word); + output_norm.at(iOutput) = make_pair(output_min, output_max); + } + } + } + + if(line.compare("") == 0){ + eoHeader = true; + } + } + if(!found_input_names){ + SU2_MPI::Error("No MLP input variable names provided", CURRENT_FUNCTION); } + if(!found_output_names){ + SU2_MPI::Error("No MLP output variable names provided", CURRENT_FUNCTION); + } + + + // line = SkipToFlag(&file_stream, "[number of layers]"); + // getline(file_stream, line); + // n_layers = stoul(line); + // n_neurons.resize(n_layers); + // biases.resize(n_layers); + // weights.resize(n_layers-1); + // activation_functions.resize(n_layers); + // line = SkipToFlag(&file_stream, "[neurons per layer]"); + // for(size_t iLayer=0; iLayer> word; + // activation_functions.at(iLayer) = word; + // } + + // line = SkipToFlag(&file_stream, "[input names]"); + // for(size_t iInput=0; iInput < n_neurons.at(0); iInput++){ + // getline(file_stream, line); + // input_names.push_back(line); + // } + + // line = SkipToFlag(&file_stream, "[input normalization]"); + // for(size_t iInput=0; iInput> word; + // input_min = stold(word); + // input_norm_stream >> word; + // input_max = stold(word); + // input_norm.at(iInput) = make_pair(input_min, input_max); + // } + + // line = SkipToFlag(&file_stream, "[output names]"); + // for(size_t iOutput=0; iOutput < n_neurons.at(n_neurons.size()-1); iOutput++){ + // getline(file_stream, line); + // output_names.push_back(line); + // } + + // line = SkipToFlag(&file_stream, "[output normalization]"); + // for(size_t iOutput=0; iOutput> word; + // output_min = stold(word); + // output_norm_stream >> word; + // output_max = stold(word); + // output_norm.at(iOutput) = make_pair(output_min, output_max); + // } line = SkipToFlag(&file_stream, "[weights per layer]"); for(size_t iLayer=0; iLayer Date: Thu, 15 Sep 2022 14:07:14 +0200 Subject: [PATCH 03/84] Moved MLP classes to separate library in its own namespace. Added a template CFluidModel class using MLPs for lookups as well --- .../multilayer_perceptron/CIOMap.hpp | 87 +++++++++++ .../multilayer_perceptron/CLayer.hpp | 135 ++++++++++++++++++ .../multilayer_perceptron/CLookUp_ANN.hpp | 36 ++--- .../multilayer_perceptron/CNeuralNetwork.hpp | 94 ++++-------- .../multilayer_perceptron/CNeuron.hpp | 113 +++++++++++++++ .../CReadNeuralNetwork.hpp | 16 ++- Common/lib/Makefile.am | 6 + Common/src/toolboxes/meson.build | 8 ++ .../multilayer_perceptron/CIOMap.cpp | 13 +- .../multilayer_perceptron/CLayer.cpp | 26 +--- .../multilayer_perceptron/CLookUp_ANN.cpp | 34 +++-- .../multilayer_perceptron/CNeuralNetwork.cpp | 67 +++++---- .../multilayer_perceptron/CNeuron.cpp | 33 +++++ .../CReadNeuralNetwork.cpp | 90 +++--------- SU2_CFD/include/fluid/CMLPGas_Template.hpp | 102 +++++++++++++ .../numerics/multilayer_perceptron/IOMap.hpp | 83 ----------- .../numerics/multilayer_perceptron/Layer.hpp | 63 -------- .../numerics/multilayer_perceptron/Neuron.hpp | 74 ---------- SU2_CFD/obj/Makefile.am | 7 +- SU2_CFD/src/fluid/CMLPGas_Template.cpp | 125 ++++++++++++++++ SU2_CFD/src/meson.build | 11 +- .../numerics/multilayer_perceptron/Neuron.cpp | 114 --------------- 22 files changed, 748 insertions(+), 589 deletions(-) create mode 100644 Common/include/toolboxes/multilayer_perceptron/CIOMap.hpp create mode 100644 Common/include/toolboxes/multilayer_perceptron/CLayer.hpp rename SU2_CFD/include/numerics/multilayer_perceptron/LookUp_ANN.hpp => Common/include/toolboxes/multilayer_perceptron/CLookUp_ANN.hpp (79%) rename SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp => Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp (59%) create mode 100644 Common/include/toolboxes/multilayer_perceptron/CNeuron.hpp rename SU2_CFD/include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp => Common/include/toolboxes/multilayer_perceptron/CReadNeuralNetwork.hpp (90%) rename SU2_CFD/src/numerics/multilayer_perceptron/IOMap.cpp => Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp (82%) rename SU2_CFD/src/numerics/multilayer_perceptron/Layer.cpp => Common/src/toolboxes/multilayer_perceptron/CLayer.cpp (67%) rename SU2_CFD/src/numerics/multilayer_perceptron/LookUp_ANN.cpp => Common/src/toolboxes/multilayer_perceptron/CLookUp_ANN.cpp (87%) rename SU2_CFD/src/numerics/multilayer_perceptron/NeuralNetwork.cpp => Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp (77%) create mode 100644 Common/src/toolboxes/multilayer_perceptron/CNeuron.cpp rename SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp => Common/src/toolboxes/multilayer_perceptron/CReadNeuralNetwork.cpp (71%) create mode 100644 SU2_CFD/include/fluid/CMLPGas_Template.hpp delete mode 100644 SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp delete mode 100644 SU2_CFD/include/numerics/multilayer_perceptron/Layer.hpp delete mode 100644 SU2_CFD/include/numerics/multilayer_perceptron/Neuron.hpp create mode 100644 SU2_CFD/src/fluid/CMLPGas_Template.cpp delete mode 100644 SU2_CFD/src/numerics/multilayer_perceptron/Neuron.cpp diff --git a/Common/include/toolboxes/multilayer_perceptron/CIOMap.hpp b/Common/include/toolboxes/multilayer_perceptron/CIOMap.hpp new file mode 100644 index 000000000000..f0ef33fefed1 --- /dev/null +++ b/Common/include/toolboxes/multilayer_perceptron/CIOMap.hpp @@ -0,0 +1,87 @@ +/*! + * \file CIOMap.hpp + * \brief Declaration of input-to-output mapping class for artifical neural networks + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ +#pragma once + +#include +#include +#include +#include +#include +#include +#include "../../CConfig.hpp" +#include "../../linear_algebra/blas_structure.hpp" +#include "CLookUp_ANN.hpp" + +using namespace std; +namespace MLPToolbox +{ + + class CLookUp_ANN; + + class CIOMap + { + private: + vector ANN_indices; + vector>> Input_Map; + vector>> Output_Map; + vector> input_indices; + public: + CIOMap(){}; + CIOMap(CLookUp_ANN*ANN_collection, vector &inputs, vector &outputs); + ~CIOMap(){}; + void Push_ANN_index(size_t i_ANN){ANN_indices.push_back(i_ANN);}; + void PairVariableswithANNs(CLookUp_ANN * ANN_collection, vector &inputs, vector &outputs); + size_t GetNANNs(){return ANN_indices.size();} + size_t GetANNIndex(size_t i_Map){return ANN_indices[i_Map];} + size_t GetInputIndex(size_t i_Map, size_t iInput){return Input_Map[i_Map][iInput].first;} + size_t GetOutputIndex(size_t i_Map, size_t iOutput){return Output_Map[i_Map][iOutput].first;} + size_t GetANNOutputIndex(size_t i_Map, size_t iOutput){return Output_Map[i_Map][iOutput].second;} + size_t GetNMappedOutputs(size_t i_Map){return Output_Map[i_Map].size();} + + vector> GetOutputMapping(size_t i_map){return Output_Map[i_map];} + vector> GetInputMapping(size_t i_map){return Input_Map[i_map];} + + vector GetANN_Inputs(size_t i_Map, vector&inputs){ + vector ANN_input; + ANN_input.resize(Input_Map[i_Map].size()); + + for(size_t iInput=0; iInput GetANN_Outputs(size_t i_Map, vector&outputs){ + vector ANN_output; + ANN_output.resize(Output_Map[i_Map].size()); + + for(size_t iOutput=0; iOutput. + */ +#pragma once + +#include +#include +#include +#include + +#include "../../CConfig.hpp" +#include "CNeuron.hpp" +#include "../../linear_algebra/blas_structure.hpp" +using namespace std; + +namespace MLPToolbox{ +class CLayer +{ +private: + unsigned long number_of_neurons; /*!< Neuron count in current layer */ + CNeuron * neurons; /*!< Array of neurons in current layer */ + bool is_input; /*!< Input layer identifyer */ + string activation_type; /*!< Activation function type applied to the current layer*/ +public: + CLayer(); + CLayer(unsigned long n_neurons); + + /*! + * \brief Set current layer neuron count + * \param[in] n_neurons - Number of neurons in this layer + */ + void setNNeurons(unsigned long n_neurons); + + /*! + * \brief Get the current layer neuron count + * \return Neuron count + */ + unsigned long getNNeurons() const {return number_of_neurons;}; + + /*! + * \brief Define current layer as input layer + * \param[in] input - input layer identifyer + */ + void setInput(bool def){is_input = def;}; + + /*! + * \brief Get input layer identifyer + * \return input layer identifyer + */ + bool isInput() const {return is_input;}; + + /*! + * \brief Set the output value of a neuron in the layer + * \param[in] i_neuron - Neuron index + * \param[in] output_value - Activation function output + */ + void setOutput(size_t i_neuron, su2double value){neurons[i_neuron].setOutput(value);} + + /*! + * \brief Get the output value of a neuron in the layer + * \param[in] i_neuron - Neuron index + * \return Neuron output value + */ + su2double getOutput(size_t i_neuron) const {return neurons[i_neuron].getOutput();} + + /*! + * \brief Set the input value of a neuron in the layer + * \param[in] i_neuron - Neuron index + * \param[in] input_value - Activation function input + */ + void setInput(size_t i_neuron, su2double value){neurons[i_neuron].setInput(value);} + + /*! + * \brief Get the input value of a neuron in the layer + * \param[in] i_neuron - Neuron index + * \return Neuron input value + */ + su2double getInput(size_t i_neuron) const {return neurons[i_neuron].getInput();} + + /*! + * \brief Set the bias value of a neuron in the layer + * \param[in] i_neuron - Neuron index + * \param[in] bias_value - Bias value + */ + void setBias(size_t i_neuron, su2double value){neurons[i_neuron].setBias(value);} + + /*! + * \brief Get the bias value of a neuron in the layer + * \param[in] i_neuron - Neuron index + * \return Neuron bias value + */ + su2double getBias(size_t i_neuron){return neurons[i_neuron].getBias();} + + /*! + * \brief Get the output-input gradient of a neuron in the layer + * \param[in] i_neuron - Neuron index + * \return Gradient of neuron output wrt input + */ + su2double getdYdX(size_t i_neuron){return neurons[i_neuron].getGradient();} + + /*! + * \brief Get the activation function name applied to this layer + * \return name of the activation function + */ + string getActivationType(){return activation_type;} + + ~CLayer(){ + delete [] neurons; + }; +}; + +} diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/LookUp_ANN.hpp b/Common/include/toolboxes/multilayer_perceptron/CLookUp_ANN.hpp similarity index 79% rename from SU2_CFD/include/numerics/multilayer_perceptron/LookUp_ANN.hpp rename to Common/include/toolboxes/multilayer_perceptron/CLookUp_ANN.hpp index 9cab1a9204c1..3b7a0baaf02e 100644 --- a/SU2_CFD/include/numerics/multilayer_perceptron/LookUp_ANN.hpp +++ b/Common/include/toolboxes/multilayer_perceptron/CLookUp_ANN.hpp @@ -1,5 +1,5 @@ /*! - * \file LookUp_ANN.hpp + * \file CLookUp_ANN.hpp * \brief Declaration of artificial neural network interpolation class * \author E. Bunschoten * \version 7.4.0 "Blackbird" @@ -33,19 +33,21 @@ #include #include #include -#include "../../../../Common/include/CConfig.hpp" -#include "../../../../Common/include/linear_algebra/blas_structure.hpp" -#include "NeuralNetwork.hpp" -#include "IOMap.hpp" +#include "../../CConfig.hpp" +#include "../../linear_algebra/blas_structure.hpp" +#include "CNeuralNetwork.hpp" +#include "CIOMap.hpp" using namespace std; -class IOMap; -class LookUp_ANN +namespace MLPToolbox{ +class CIOMap; + +class CLookUp_ANN { private: int rank{0}; - vector NeuralNetworks; /*!< Vector containing all loaded neural networks. */ + vector NeuralNetworks; /*!< Vector containing all loaded neural networks. */ vector ANN_filenames; /*!< Filenames containing ANN architecture information. */ @@ -64,7 +66,7 @@ class LookUp_ANN * \param[in] ANN - pointer to target NeuralNetwork class * \param[in] filename - filename containing ANN architecture information */ - void GenerateANN(NeuralNetwork*ANN, string filename); + void GenerateANN(CNeuralNetwork*ANN, string filename); /*! * \brief Read ANN architecture input file @@ -78,7 +80,7 @@ class LookUp_ANN * \brief ANN collection class constructor * \param[in] filename - filename containing list of ANN input files */ - LookUp_ANN(string fileName); + CLookUp_ANN(string fileName); /*! * \brief Evaluate loaded ANNs for given inputs and outputs @@ -86,9 +88,9 @@ class LookUp_ANN * \param[in] inputs - input values * \param[in] outputs - pointers to output variables */ - void Predict_ANN(IOMap *input_output_map, vector &inputs, vector &outputs); + void Predict_ANN(CIOMap *input_output_map, vector &inputs, vector &outputs); - ~LookUp_ANN(){for(size_t i_ANN=0; i_ANN &output_names, IOMap *input_output_map) const; + bool Check_Duplicate_Outputs(vector &output_names, CIOMap *input_output_map) const; /*! * \brief Check if all output variables are present in the loaded ANNs * \param[in] output_names - output variable names to check * \param[in] input_output_map - pointer to input-output map to be checked */ - bool Check_Use_of_Outputs(vector &output_names, IOMap *input_output_map) const; + bool Check_Use_of_Outputs(vector &output_names, CIOMap *input_output_map) const; /*! * \brief Check if all input variables are present in the loaded ANNs * \param[in] input_names - input variable names to check * \param[in] input_output_map - pointer to input-output map to be checked */ - bool Check_Use_of_Inputs(vector &input_names, IOMap *input_output_map) const; + bool Check_Use_of_Inputs(vector &input_names, CIOMap *input_output_map) const; /*! * \brief Map variable names to ANN inputs or outputs @@ -129,3 +131,5 @@ class LookUp_ANN }; +} + diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp b/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp similarity index 59% rename from SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp rename to Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp index a5cb8beeffa3..6e54cd363a86 100644 --- a/SU2_CFD/include/numerics/multilayer_perceptron/NeuralNetwork.hpp +++ b/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp @@ -1,5 +1,5 @@ /*! - * \file NeuralNetwork.hpp + * \file CNeuralNetwork.hpp * \brief Declaration of the neural network class * \author E. Bunschoten * \version 7.4.0 "Blackbird" @@ -33,71 +33,34 @@ #include #include -#include "../../../../Common/include/CConfig.hpp" -#include "../../../../Common/include/linear_algebra/blas_structure.hpp" -#include "Layer.hpp" +#include "../../CConfig.hpp" +#include "../../linear_algebra/blas_structure.hpp" +#include "CLayer.hpp" using namespace std; -class Activation_Function { - private: - public: - Activation_Function(){}; - ~Activation_Function(){}; - inline virtual su2double run_activation_function(su2double x) {return 0.0;} -}; - -class Linear : public Activation_Function { - private: - public: - Linear() : Activation_Function() {}; - ~Linear(){}; - su2double run_activation_function(su2double x) final {return x;} -}; - -class Relu : public Activation_Function { - private: - public: - Relu() : Activation_Function() {}; - ~Relu(){}; - su2double run_activation_function(su2double x) final {return max(0.0, x);} -}; - -class SmoothSlope : public Activation_Function { - private: - su2double tanhx; - public: - SmoothSlope() : Activation_Function() {}; - ~SmoothSlope(){}; - su2double run_activation_function(su2double x) final {tanhx = tanh(x); return max(tanhx, x);} -}; - -class None : public Activation_Function { - private: - public: - None() : Activation_Function() {}; - ~None(){}; - su2double run_activation_function(su2double x) final {return 0.0;} -}; - -class NeuralNetwork +namespace MLPToolbox{ +class CNeuralNetwork { private: - vector input_names; - vector output_names; + vector input_names, + output_names; unsigned long n_hidden_layers; - Layer *inputLayer; - Layer *outputLayer; - vector< Layer *> hiddenLayers; - vectortotal_layers; + + CLayer *inputLayer, + *outputLayer; + + vector hiddenLayers, + total_layers; vector>> weights; vector weights_mat; vector> values; - vector> input_norm; - vector> output_norm; + vector> input_norm, + output_norm; vector last_inputs; + su2double* ANN_outputs; enum ENUM_ACTIVATION_FUNCTION { @@ -108,15 +71,14 @@ class NeuralNetwork ELU = 4 }; ENUM_ACTIVATION_FUNCTION * activation_function_types; - vector activation_functions; - Activation_Function* current_activation_function; - //Activation_Function *activation_functions; + public: - NeuralNetwork(); - ~NeuralNetwork(){ + CNeuralNetwork(); + ~CNeuralNetwork(){ for(size_t i=0; igetNNeurons();} unsigned long getNNeurons(unsigned long iLayer, unsigned long iNeuron){return weights.at(iLayer).at(iNeuron).size();} - // su2double predict(su2double * X, size_t i_output); - // void predict(vector &inputs, vector &outputs); void predict(vector &inputs); - //void predict_new(vector &inputs, vector &outputs, su2double**doutputs_dinputs=nullptr); void SetInputNorm(unsigned long iInput, su2double input_min, su2double input_max){input_norm.at(iInput) = make_pair(input_min, input_max);} void SetOutputNorm(unsigned long iOutput, su2double output_min, su2double output_max){output_norm.at(iOutput) = make_pair(output_min, output_max);} @@ -153,10 +112,7 @@ class NeuralNetwork size_t GetnOutputs(){return output_names.size();} su2double GetANN_Output(size_t iOutput){return ANN_outputs[iOutput];} - void SizeActivationFunctions(unsigned long n_layers){activation_function_types = new ENUM_ACTIVATION_FUNCTION[n_layers]; activation_functions.resize(4); - activation_functions[ENUM_ACTIVATION_FUNCTION::LINEAR] = new Linear(); - activation_functions[ENUM_ACTIVATION_FUNCTION::RELU] = new Relu(); - activation_functions[ENUM_ACTIVATION_FUNCTION::SMOOTH_SLOPE] = new SmoothSlope(); + void SizeActivationFunctions(unsigned long n_layers){activation_function_types = new ENUM_ACTIVATION_FUNCTION[n_layers]; } su2double ComputeX(size_t iLayer, size_t iNeuron){ @@ -164,13 +120,13 @@ class NeuralNetwork x = total_layers[iLayer]->getBias(iNeuron); size_t nNeurons_previous = total_layers[iLayer - 1]->getNNeurons(); for(size_t jNeuron=0; jNeurongetValue(jNeuron); + x += weights_mat[iLayer - 1][iNeuron][jNeuron] * total_layers[iLayer-1]->getOutput(jNeuron); } return x; } - // su2double run_activation_function(unsigned long iLayer, su2double x){return activation_functions[iLayer]->run_activation_function(x);} - //void SetActivationFunction(size_t iLayer, unsigned long function_index){activation_functions[iLayer] = ENUM_ACTIVATION_FUNCTION(function_index);} }; +} + diff --git a/Common/include/toolboxes/multilayer_perceptron/CNeuron.hpp b/Common/include/toolboxes/multilayer_perceptron/CNeuron.hpp new file mode 100644 index 000000000000..fed140d4457e --- /dev/null +++ b/Common/include/toolboxes/multilayer_perceptron/CNeuron.hpp @@ -0,0 +1,113 @@ +/*! + * \file CNeuron.hpp + * \brief Declaration of artificial neural network perceptron class + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ +#pragma once + +#include +#include +#include +#include + +#include "../../CConfig.hpp" +#include "../../linear_algebra/blas_structure.hpp" + +using namespace std; +namespace MLPToolbox{ +class CNeuron +{ +private: + unsigned long i_neuron; /*!< Neuron identification number */ + su2double output{0}, /*!< Output value of the current neuron */ + input{0}, /*!< Input value of the current neuron */ + doutput_dinput{0}, /*!< Gradient of output with respect to input */ + bias{0}; /*!< Bias value at current neuron */ + +public: + CNeuron(){}; + + /*! + * \brief Set neuron identification number + * \param[in] input - Identification number + */ + void setNumber(unsigned long input){i_neuron = input;} + + /*! + * \brief Get neuron identification number + * \return Identification number + */ + unsigned long getNumber() const {return i_neuron;} + + /*! + * \brief Set neuron output value + * \param[in] input - activation function output value + */ + void setOutput(su2double input){output = input;} + + /*! + * \brief Get neuron output value + * \return Output value + */ + su2double getOutput() const {return output;} + + /*! + * \brief Set neuron input value + * \param[in] input - activation function input value + */ + void setInput(su2double x){input = x;} + + /*! + * \brief Get neuron input value + * \return input value + */ + su2double getInput() const {return input;} + + /*! + * \brief Set neuron bias + * \param[in] input - bias value + */ + void setBias(su2double input){bias = input;} + + /*! + * \brief Get neuron bias value + * \return bias value + */ + su2double getBias() const {return bias;} + + /*! + * \brief Set neuron output gradient with respect to its input value + * \param[in] input - Derivative of activation function with respect to input + */ + void setGradient(su2double input){doutput_dinput = input;} + + /*! + * \brief Get neuron output gradient with respect to input value + * \return output gradient wrt input value + */ + su2double getGradient() const {return doutput_dinput;} +}; + +} + diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp b/Common/include/toolboxes/multilayer_perceptron/CReadNeuralNetwork.hpp similarity index 90% rename from SU2_CFD/include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp rename to Common/include/toolboxes/multilayer_perceptron/CReadNeuralNetwork.hpp index e33d267cbe20..d471e42d9399 100644 --- a/SU2_CFD/include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp +++ b/Common/include/toolboxes/multilayer_perceptron/CReadNeuralNetwork.hpp @@ -1,5 +1,5 @@ /*! - * \file ReadNeuralNetwork.hpp + * \file CReadNeuralNetwork.hpp * \brief Declaration of MLP input file reader class * \author E. Bunschoten * \version 7.4.0 "Blackbird" @@ -31,11 +31,12 @@ #include #include -#include "../../../../Common/include/CConfig.hpp" -#include "../../../../Common/include/linear_algebra/blas_structure.hpp" +#include "../../CConfig.hpp" +#include "../../linear_algebra/blas_structure.hpp" using namespace std; -class ReadNeuralNetwork { +namespace MLPToolbox{ +class CReadNeuralNetwork { private: vector input_names; vector output_names; @@ -51,7 +52,7 @@ class ReadNeuralNetwork { vector> output_norm; public: - ReadNeuralNetwork(string filename_in); + CReadNeuralNetwork(string filename_in); void ReadMLPFile(); string SkipToFlag(ifstream *file_stream, string flag); @@ -70,5 +71,6 @@ class ReadNeuralNetwork { string GetInputName(size_t iInput){return input_names.at(iInput);} string GetOutputName(size_t iOutput){return output_names.at(iOutput);} - ~ReadNeuralNetwork(){}; -}; \ No newline at end of file + ~CReadNeuralNetwork(){}; +}; +} diff --git a/Common/lib/Makefile.am b/Common/lib/Makefile.am index af2046cf826c..c938e9319666 100644 --- a/Common/lib/Makefile.am +++ b/Common/lib/Makefile.am @@ -128,6 +128,12 @@ lib_sources = \ ../src/toolboxes/MMS/CRinglebSolution.cpp \ ../src/toolboxes/MMS/CTGVSolution.cpp \ ../src/toolboxes/MMS/CUserDefinedSolution.cpp \ + ../src/toolboxes/multilayer_perceptron/CLookUp_ANN.cpp\ + ../src/toolboxes/multilayer_perceptron/CIOMap.cpp\ + ../src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp\ + ../src/toolboxes/multilayer_perceptron/CLayer.cpp\ + ../src/toolboxes/multilayer_perceptron/CNeuron.cpp\ + ../src/toolboxes/multilayer_perceptron/CReadNeuralNetwork.cpp\ ../src/linear_algebra/CSysVector.cpp \ ../src/linear_algebra/CSysMatrix.cpp \ ../src/linear_algebra/CSysSolve.cpp \ diff --git a/Common/src/toolboxes/meson.build b/Common/src/toolboxes/meson.build index d95ffad59924..d6441390a579 100644 --- a/Common/src/toolboxes/meson.build +++ b/Common/src/toolboxes/meson.build @@ -5,3 +5,11 @@ common_src += files(['CLinearPartitioner.cpp', 'CSymmetricMatrix.cpp']) subdir('MMS') + +common_src += files(['multilayer_perceptron/CLookUp_ANN.cpp', + 'multilayer_perceptron/CIOMap.cpp', + 'multilayer_perceptron/CNeuralNetwork.cpp', + 'multilayer_perceptron/CLayer.cpp', + 'multilayer_perceptron/CNeuron.cpp', + 'multilayer_perceptron/CReadNeuralNetwork.cpp']) + diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/IOMap.cpp b/Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp similarity index 82% rename from SU2_CFD/src/numerics/multilayer_perceptron/IOMap.cpp rename to Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp index 82d2faf19657..d6bba0ce8b25 100644 --- a/SU2_CFD/src/numerics/multilayer_perceptron/IOMap.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp @@ -1,5 +1,5 @@ /*! - * \file IOMap.cpp + * \file CIOMap.cpp * \brief Implementation of the input-output mapping class for the * use of multi-layer perceptrons in SU2 * \author E. Bunschoten @@ -25,14 +25,14 @@ * You should have received a copy of the GNU Lesser General Public * License along with SU2. If not, see . */ -#include "../../../include/numerics/multilayer_perceptron/LookUp_ANN.hpp" -#include "../../../include/numerics/multilayer_perceptron/IOMap.hpp" +#include "../../../include/toolboxes/multilayer_perceptron/CLookUp_ANN.hpp" +#include "../../../include/toolboxes/multilayer_perceptron/CIOMap.hpp" #include #include #include #include -IOMap::IOMap(LookUp_ANN*ANN_collection, vector &inputs, vector &outputs){ +MLPToolbox::CIOMap::CIOMap(CLookUp_ANN*ANN_collection, vector &inputs, vector &outputs){ PairVariableswithANNs(ANN_collection, inputs, outputs); if(outputs.size() > 0){ ANN_collection->Check_Use_of_Inputs(inputs, this); @@ -40,14 +40,13 @@ IOMap::IOMap(LookUp_ANN*ANN_collection, vector &inputs, vector & ANN_collection->Check_Duplicate_Outputs(outputs, this); } } -void IOMap::PairVariableswithANNs(LookUp_ANN*ANN_collection, vector &inputs, vector &outputs){ +void MLPToolbox::CIOMap::PairVariableswithANNs(CLookUp_ANN*ANN_collection, vector &inputs, vector &outputs){ - bool isInput, isOutput,input_match; + bool isInput, isOutput; for(size_t i_ANN=0; i_ANNGetNANNs(); i_ANN++){ vector> Input_Indices = ANN_collection->FindVariable_Indices(i_ANN, inputs, true); isInput = Input_Indices.size() > 0; if(isInput){ - input_match = true; vector> Output_Indices = ANN_collection->FindVariable_Indices(i_ANN, outputs, false); isOutput = Output_Indices.size() > 0; if(isOutput){ diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/Layer.cpp b/Common/src/toolboxes/multilayer_perceptron/CLayer.cpp similarity index 67% rename from SU2_CFD/src/numerics/multilayer_perceptron/Layer.cpp rename to Common/src/toolboxes/multilayer_perceptron/CLayer.cpp index b666e4547d34..74f1e702063f 100644 --- a/SU2_CFD/src/numerics/multilayer_perceptron/Layer.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CLayer.cpp @@ -1,5 +1,5 @@ /*! - * \file Layer.cpp + * \file CLayer.cpp * \brief Implementation of the Layer class to be used in the NeuralNetwork * class * \author E. Bunschoten @@ -25,38 +25,26 @@ * You should have received a copy of the GNU Lesser General Public * License along with SU2. If not, see . */ -#include "../../../include/numerics/multilayer_perceptron/Layer.hpp" +#include "../../../include/toolboxes/multilayer_perceptron/CLayer.hpp" #include using namespace std; -Layer::Layer() : Layer(1) {}; +MLPToolbox::CLayer::CLayer() : CLayer(1) {}; -Layer::Layer(unsigned long n_neurons) : number_of_neurons{n_neurons}, input{false} +MLPToolbox::CLayer::CLayer(unsigned long n_neurons) : number_of_neurons{n_neurons}, is_input{false} { - neurons = new Neuron[n_neurons]; + neurons = new CNeuron[n_neurons]; for(size_t i=0; i. */ -#include "../../../include/numerics/multilayer_perceptron/LookUp_ANN.hpp" -#include "../../../include/numerics/multilayer_perceptron/IOMap.hpp" -#include "../../../include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp" +#include "../../../include/toolboxes/multilayer_perceptron/CLookUp_ANN.hpp" +#include "../../../include/toolboxes/multilayer_perceptron/CIOMap.hpp" +#include "../../../include/toolboxes/multilayer_perceptron/CReadNeuralNetwork.hpp" #include #include #include @@ -35,7 +35,7 @@ using namespace std; -LookUp_ANN::LookUp_ANN(string inputFileName) +MLPToolbox::CLookUp_ANN::CLookUp_ANN(string inputFileName) { #ifdef HAVE_MPI @@ -47,7 +47,7 @@ LookUp_ANN::LookUp_ANN(string inputFileName) ReadANNInputFile(inputFileName); for(size_t i_ANN=0; i_ANN> LookUp_ANN::FindVariable_Indices(size_t i_ANN, vector variable_names, bool input) const { +vector> MLPToolbox::CLookUp_ANN::FindVariable_Indices(size_t i_ANN, vector variable_names, bool input) const { vector> variable_indices; size_t nVar = input ? NeuralNetworks[i_ANN]->GetnInputs() : NeuralNetworks[i_ANN]->GetnOutputs(); @@ -72,7 +72,7 @@ vector> LookUp_ANN::FindVariable_Indices(size_t i_ANN, vect return variable_indices; } -void LookUp_ANN::Predict_ANN(IOMap *input_output_map, vector& inputs, vector& outputs){ +void MLPToolbox::CLookUp_ANN::Predict_ANN(CIOMap *input_output_map, vector& inputs, vector& outputs){ for(size_t i_map=0; i_mapGetNANNs(); i_map++){ size_t i_ANN = input_output_map->GetANNIndex(i_map); vector ANN_inputs = input_output_map->GetANN_Inputs(i_map, inputs); @@ -82,9 +82,9 @@ void LookUp_ANN::Predict_ANN(IOMap *input_output_map, vector& inputs, } } } -void LookUp_ANN::GenerateANN(NeuralNetwork * ANN, string fileName) +void MLPToolbox::CLookUp_ANN::GenerateANN(CNeuralNetwork * ANN, string fileName) { - ReadNeuralNetwork Reader = ReadNeuralNetwork(fileName); + CReadNeuralNetwork Reader = CReadNeuralNetwork(fileName); // Read MLP input file Reader.ReadMLPFile(); @@ -136,7 +136,8 @@ void LookUp_ANN::GenerateANN(NeuralNetwork * ANN, string fileName) } } -void LookUp_ANN::ReadANNInputFile(string inputFileName) + +void MLPToolbox::CLookUp_ANN::ReadANNInputFile(string inputFileName) { ifstream file_stream; istringstream stream_names_var; @@ -165,7 +166,7 @@ void LookUp_ANN::ReadANNInputFile(string inputFileName) } ANN_filenames.pop_back(); } -string LookUp_ANN::SkipToFlag(ifstream *file_stream, string flag) { +string MLPToolbox::CLookUp_ANN::SkipToFlag(ifstream *file_stream, string flag) { string line; getline(*file_stream, line); @@ -180,7 +181,7 @@ string LookUp_ANN::SkipToFlag(ifstream *file_stream, string flag) { } -bool LookUp_ANN::Check_Duplicate_Outputs(vector &output_names, IOMap *input_output_map) const { +bool MLPToolbox::CLookUp_ANN::Check_Duplicate_Outputs(vector &output_names, CIOMap *input_output_map) const { unsigned short n_occurances; bool duplicate{false}; vector duplicate_variables; @@ -208,14 +209,12 @@ bool LookUp_ANN::Check_Duplicate_Outputs(vector &output_names, IOMap *in } -bool LookUp_ANN::Check_Use_of_Inputs(vector &input_names, IOMap *input_output_map) const { -unsigned short n_inputs = input_names.size(); +bool MLPToolbox::CLookUp_ANN::Check_Use_of_Inputs(vector &input_names, CIOMap *input_output_map) const { vector missing_inputs; bool inputs_are_present{true}; for(size_t iInput=0; iInputGetNANNs(); i_map++){ - size_t i_ANN = input_output_map->GetANNIndex(i_map); vector> input_map = input_output_map->GetInputMapping(i_map); for(size_t jInput=0; jInput 0){ return inputs_are_present; } -bool LookUp_ANN::Check_Use_of_Outputs(vector &output_names, IOMap * input_output_map) const { +bool MLPToolbox::CLookUp_ANN::Check_Use_of_Outputs(vector &output_names, CIOMap * input_output_map) const { /* Check wether all output variables are in the loaded MLPs */ vector missing_outputs; @@ -248,7 +247,6 @@ bool LookUp_ANN::Check_Use_of_Outputs(vector &output_names, IOMap * inpu /* Looping over all the selected ANNs */ for(size_t i_map=0; i_map < input_output_map->GetNANNs(); i_map++){ - size_t i_ANN = input_output_map->GetANNIndex(i_map); vector> output_map = input_output_map->GetOutputMapping(i_map); /* Looping over the outputs of the output map of the current ANN */ diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/NeuralNetwork.cpp b/Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp similarity index 77% rename from SU2_CFD/src/numerics/multilayer_perceptron/NeuralNetwork.cpp rename to Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp index 2ece6768c590..b43446626135 100644 --- a/SU2_CFD/src/numerics/multilayer_perceptron/NeuralNetwork.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp @@ -1,5 +1,5 @@ /*! - * \file NeuralNetwork.cpp + * \file CNeuralNetwork.cpp * \brief Implementation of the NeuralNetwork class to be used * for evaluation of multi-layer perceptrons. * \author E. Bunschoten @@ -25,63 +25,65 @@ * You should have received a copy of the GNU Lesser General Public * License along with SU2. If not, see . */ -#include "../../../include/numerics/multilayer_perceptron/NeuralNetwork.hpp" -#include "../../../include/numerics/multilayer_perceptron/Layer.hpp" +#include "../../../include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp" +#include "../../../include/toolboxes/multilayer_perceptron/CLayer.hpp" #include -#include "../../../include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp" -#include "../../../Common/include/toolboxes/geometry_toolbox.hpp" +#include "../../../include/toolboxes/multilayer_perceptron/CReadNeuralNetwork.hpp" using namespace std; -void NeuralNetwork::predict(vector &inputs){ +void MLPToolbox::CNeuralNetwork::predict(vector &inputs){ su2double x, x_norm, y, y_norm, tanx; size_t iNeuron, jNeuron, iLayer, nNeurons_current, nNeurons_previous; bool same_point{true}; for(iNeuron=0; iNeurongetNNeurons(); iNeuron++){ x_norm = (inputs[iNeuron] - input_norm[iNeuron].first)/(input_norm[iNeuron].second - input_norm[iNeuron].first); - if(abs(x_norm - inputLayer->getValue(iNeuron)) > 0) same_point = false; - inputLayer->setValue(iNeuron, x_norm); + if(abs(x_norm - inputLayer->getOutput(iNeuron)) > 0) same_point = false; + inputLayer->setOutput(iNeuron, x_norm); } if(!same_point){ for(iLayer=1; iLayergetNNeurons(); nNeurons_previous = total_layers[iLayer - 1]->getNNeurons(); - + for(iNeuron=0; iNeuronsetInput(iNeuron, x); + } switch (activation_function_types[iLayer]) { case ENUM_ACTIVATION_FUNCTION::SMOOTH_SLOPE: for(iNeuron=0; iNeurongetInput(iNeuron); y = x > 0 ? x : tanh(x); - total_layers[iLayer]->setValue(iNeuron, y); + total_layers[iLayer]->setOutput(iNeuron, y); } break; case ENUM_ACTIVATION_FUNCTION::ELU: for(iNeuron=0; iNeurongetInput(iNeuron); y = x > 0 ? x : (exp(x) - 1); - total_layers[iLayer]->setValue(iNeuron, y); + total_layers[iLayer]->setOutput(iNeuron, y); } break; case ENUM_ACTIVATION_FUNCTION::LINEAR: for(iNeuron=0; iNeurongetInput(iNeuron); y = x; - total_layers[iLayer]->setValue(iNeuron, y); + total_layers[iLayer]->setOutput(iNeuron, y); } break; case ENUM_ACTIVATION_FUNCTION::RELU: for(iNeuron=0; iNeurongetInput(iNeuron); y = x > 0.0 ? x : 0.0; - total_layers[iLayer]->setValue(iNeuron, y); + total_layers[iLayer]->setOutput(iNeuron, y); } break; case ENUM_ACTIVATION_FUNCTION::NONE: for(iNeuron=0; iNeuronsetValue(iNeuron, y); + total_layers[iLayer]->setOutput(iNeuron, y); } break; default: @@ -91,7 +93,7 @@ void NeuralNetwork::predict(vector &inputs){ } } for(iNeuron=0; iNeurongetNNeurons(); iNeuron++){ - y_norm = outputLayer->getValue(iNeuron); + y_norm = outputLayer->getOutput(iNeuron); y = y_norm*(output_norm[iNeuron].second - output_norm[iNeuron].first) + output_norm[iNeuron].first; // Setting output value @@ -99,38 +101,38 @@ void NeuralNetwork::predict(vector &inputs){ } } -NeuralNetwork::NeuralNetwork(){ +MLPToolbox::CNeuralNetwork::CNeuralNetwork(){ inputLayer = nullptr; outputLayer = nullptr; n_hidden_layers = 0; } -void NeuralNetwork::defineInputLayer(unsigned long n_neurons){ +void MLPToolbox::CNeuralNetwork::defineInputLayer(unsigned long n_neurons){ //cout << "Creating an input layer with " << n_neurons << " neurons. "<< endl; - inputLayer = new Layer(n_neurons); + inputLayer = new CLayer(n_neurons); inputLayer->setInput(true); input_norm.resize(n_neurons); } -void NeuralNetwork::defineOutputLayer(unsigned long n_neurons){ +void MLPToolbox::CNeuralNetwork::defineOutputLayer(unsigned long n_neurons){ //cout << "Creating an output layer with " << n_neurons << " neurons. "<< endl; - outputLayer = new Layer(n_neurons); + outputLayer = new CLayer(n_neurons); output_norm.resize(n_neurons); } -void NeuralNetwork::push_hidden_layer(unsigned long n_neurons){ - Layer *newLayer = new Layer(n_neurons); +void MLPToolbox::CNeuralNetwork::push_hidden_layer(unsigned long n_neurons){ + CLayer *newLayer = new CLayer(n_neurons); //cout << "Creating a hidden layer with " << n_neurons << " neurons. "<< endl; hiddenLayers.push_back(newLayer); n_hidden_layers ++; } -void NeuralNetwork::sizeWeights(){ +void MLPToolbox::CNeuralNetwork::sizeWeights(){ unsigned long i_layer{0}; weights.resize(n_hidden_layers + 1); weights.at(i_layer).resize(inputLayer->getNNeurons()); - Layer * previouslayer = inputLayer; + CLayer * previouslayer = inputLayer; if(n_hidden_layers != 0){ for(size_t i_hidden_layer=0; i_hidden_layer < n_hidden_layers; i_hidden_layer++){ @@ -163,19 +165,16 @@ void NeuralNetwork::sizeWeights(){ ANN_outputs = new su2double[outputLayer->getNNeurons()]; } -void NeuralNetwork::setWeight(unsigned long i_layer, unsigned long i_neuron, unsigned long j_neuron, su2double value){ +void MLPToolbox::CNeuralNetwork::setWeight(unsigned long i_layer, unsigned long i_neuron, unsigned long j_neuron, su2double value){ //weights.at(i_layer).at(i_neuron).at(j_neuron) = value; weights_mat[i_layer][j_neuron][i_neuron] = value; } -void NeuralNetwork::displayNetwork(){ +void MLPToolbox::CNeuralNetwork::displayNetwork(){ cout << "Input layer: " << inputLayer->getNNeurons() << " neurons, Activation Function: " << inputLayer->getActivationType() << endl; for(size_t i=0; igetNNeurons(); i++){ for(size_t j=0; jgetNNeurons(); j++){ cout << weights_mat[0][i][j] << " "; } - su2double thingy[] = {1, 1, 1}; - cout << GeometryToolbox::DotProduct(inputLayer->getNNeurons(), weights_mat[0][i], thingy) << endl; - cout << endl; } for(size_t i_layer=0; i_layer < hiddenLayers.size(); i_layer++){ cout << "Hidden layer " << i_layer + 1 << ": " << hiddenLayers.at(i_layer)->getNNeurons() << " neurons, Activation Function: " << hiddenLayers.at(i_layer) ->getActivationType() << endl; @@ -189,7 +188,7 @@ void NeuralNetwork::displayNetwork(){ cout << "Output layer: "<getNNeurons() <<" neurons, Activation Function: " << outputLayer->getActivationType() << endl; } -void NeuralNetwork::setActivationFunction(unsigned long i_layer, string input) +void MLPToolbox::CNeuralNetwork::setActivationFunction(unsigned long i_layer, string input) { if(input.compare("linear") == 0){ //activation_functions[i_layer] = new Linear(); diff --git a/Common/src/toolboxes/multilayer_perceptron/CNeuron.cpp b/Common/src/toolboxes/multilayer_perceptron/CNeuron.cpp new file mode 100644 index 000000000000..f52bdb007f2b --- /dev/null +++ b/Common/src/toolboxes/multilayer_perceptron/CNeuron.cpp @@ -0,0 +1,33 @@ +/*! + * \file CNeuron.cpp + * \brief Implementation of the neuron class to be used within the + * Layer class as a part of the NeuralNetwork class. + * \author E. Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ +#include "../../../include/toolboxes/multilayer_perceptron/CNeuron.hpp" +#include +#include +#include + +using namespace std; \ No newline at end of file diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp b/Common/src/toolboxes/multilayer_perceptron/CReadNeuralNetwork.cpp similarity index 71% rename from SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp rename to Common/src/toolboxes/multilayer_perceptron/CReadNeuralNetwork.cpp index 13d0b64a14fe..8c34674e2605 100644 --- a/SU2_CFD/src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CReadNeuralNetwork.cpp @@ -25,18 +25,18 @@ * You should have received a copy of the GNU Lesser General Public * License along with SU2. If not, see . */ -#include "../../../include/numerics/multilayer_perceptron/ReadNeuralNetwork.hpp" +#include "../../../include/toolboxes/multilayer_perceptron/CReadNeuralNetwork.hpp" using namespace std; -ReadNeuralNetwork::ReadNeuralNetwork(string filename_in){ +MLPToolbox::CReadNeuralNetwork::CReadNeuralNetwork(string filename_in){ filename = filename_in; }; -void ReadNeuralNetwork::ReadMLPFile(){ +void MLPToolbox::CReadNeuralNetwork::ReadMLPFile(){ ifstream file_stream; file_stream.open(filename.c_str(), ifstream::in); if (!file_stream.is_open()) { - SU2_MPI::Error(string("There is no MLP file file called ") + filename, + SU2_MPI::Error(string("There is no MLP file called ") + filename, CURRENT_FUNCTION); } @@ -44,9 +44,13 @@ void ReadNeuralNetwork::ReadMLPFile(){ double input_min, input_max, output_min, output_max; bool eoHeader{false}, found_layercount{false}, found_input_names{false}, found_output_names{false}; + /* Read general architecture information from file header */ + line = SkipToFlag(&file_stream, "
"); while(getline(file_stream, line) && !eoHeader){ + + /* Read layer count */ if(line.compare("[number of layers]") == 0){ getline(file_stream, line); n_layers = stoul(line); @@ -58,7 +62,10 @@ void ReadNeuralNetwork::ReadMLPFile(){ found_layercount = true; } + /* Set number of neurons for each layer */ if(line.compare("[neurons per layer]") == 0){ + + /* In case layer count was not yet provided, return an error */ if(!found_layercount){ SU2_MPI::Error("No layer count provided before defining neuron count per layer", CURRENT_FUNCTION); } @@ -86,6 +93,7 @@ void ReadNeuralNetwork::ReadMLPFile(){ for(size_t iNeuron=0; iNeuron> word; - // activation_functions.at(iLayer) = word; - // } - - // line = SkipToFlag(&file_stream, "[input names]"); - // for(size_t iInput=0; iInput < n_neurons.at(0); iInput++){ - // getline(file_stream, line); - // input_names.push_back(line); - // } - - // line = SkipToFlag(&file_stream, "[input normalization]"); - // for(size_t iInput=0; iInput> word; - // input_min = stold(word); - // input_norm_stream >> word; - // input_max = stold(word); - // input_norm.at(iInput) = make_pair(input_min, input_max); - // } - - // line = SkipToFlag(&file_stream, "[output names]"); - // for(size_t iOutput=0; iOutput < n_neurons.at(n_neurons.size()-1); iOutput++){ - // getline(file_stream, line); - // output_names.push_back(line); - // } - - // line = SkipToFlag(&file_stream, "[output normalization]"); - // for(size_t iOutput=0; iOutput> word; - // output_min = stold(word); - // output_norm_stream >> word; - // output_max = stold(word); - // output_norm.at(iOutput) = make_pair(output_min, output_max); - // } - + /* Read weights for each layer */ line = SkipToFlag(&file_stream, "[weights per layer]"); for(size_t iLayer=0; iLayer. + */ + +#pragma once + +#include "CFluidModel.hpp" +#include "../../../Common/include/toolboxes/multilayer_perceptron/CLookUp_ANN.hpp" +/*! + * \class CMLPGas_Template + * \brief Template class for fluid model definition using multi-layer perceptrons for + * fluid dynamic state definition. + * \author: E.Bunschoten. + */ +class CMLPGas_Template : public CFluidModel { + protected: + su2double Gamma{0.0}; /*!< \brief Ratio of Specific Heats. */ + su2double Gamma_Minus_One{0.0}; /*!< \brief Ratio of Specific Heats Minus One. */ + su2double Gas_Constant{0.0}; /*!< \brief Gas Constant. */ + bool ComputeEntropy{true}; /*!< \brief Whether or not to compute entropy. */ + + string ann_input_filename; + MLPToolbox::CLookUp_ANN * lookup_ann; + vector mlp_input_names, lookup_names; + vector lookup_data; + + MLPToolbox::CIOMap * iomap_rhoe; + vector input_names_rhoe, + output_names_rhoe; + vector outputs_rhoe; + + MLPToolbox::CIOMap * iomap_PT; + vector input_names_PT, + output_names_PT; + vector outputs_PT; + + MLPToolbox::CIOMap * iomap_Prho; + vector input_names_Prho, + output_names_Prho; + vector outputs_Prho; + + void MapInputs_to_Outputs(); + + public: + /*! + * \brief Constructor of the class. + */ + CMLPGas_Template(su2double gamma, su2double R, bool CompEntropy = true); + + ~CMLPGas_Template(); + /*! + * \brief Set the Dimensionless State using Density and Internal Energy + * \param[in] rho - first thermodynamic variable. + * \param[in] e - second thermodynamic variable. + */ + void SetTDState_rhoe(su2double rho, su2double e) override; + + /*! + * \brief Set the Dimensionless State using Pressure and Temperature + * \param[in] P - first thermodynamic variable. + * \param[in] T - second thermodynamic variable. + */ + void SetTDState_PT(su2double P, su2double T) override; + + /*! + * \brief Set the Dimensionless State using Pressure and Density + * \param[in] P - first thermodynamic variable. + * \param[in] rho - second thermodynamic variable. + */ + void SetTDState_Prho(su2double P, su2double rho) override; + + /*! + * \brief Set the Dimensionless Internal Energy using Pressure and Density + * \param[in] P - first thermodynamic variable. + * \param[in] rho - second thermodynamic variable. + */ + void SetEnergy_Prho(su2double P, su2double rho) override; +}; diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp b/SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp deleted file mode 100644 index 2e2f14b1d757..000000000000 --- a/SU2_CFD/include/numerics/multilayer_perceptron/IOMap.hpp +++ /dev/null @@ -1,83 +0,0 @@ -/*! - * \file IOMap.hpp - * \brief Declaration of input-to-output mapping class for artifical neural networks - * \author E. Bunschoten - * \version 7.4.0 "Blackbird" - * - * SU2 Project Website: https://su2code.github.io - * - * The SU2 Project is maintained by the SU2 Foundation - * (http://su2foundation.org) - * - * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) - * - * SU2 is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * SU2 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with SU2. If not, see . - */ -#pragma once - -#include -#include -#include -#include -#include -#include -#include "../../../../Common/include/CConfig.hpp" -#include "../../../../Common/include/linear_algebra/blas_structure.hpp" -#include "LookUp_ANN.hpp" - -using namespace std; -class LookUp_ANN; - -class IOMap -{ - private: - vector ANN_indices; - vector>> Input_Map; - vector>> Output_Map; - vector> input_indices; - public: - IOMap(){}; - IOMap(LookUp_ANN*ANN_collection, vector &inputs, vector &outputs); - ~IOMap(){}; - void Push_ANN_index(size_t i_ANN){ANN_indices.push_back(i_ANN);}; - void PairVariableswithANNs(LookUp_ANN * ANN_collection, vector &inputs, vector &outputs); - size_t GetNANNs(){return ANN_indices.size();} - size_t GetANNIndex(size_t i_Map){return ANN_indices[i_Map];} - size_t GetInputIndex(size_t i_Map, size_t iInput){return Input_Map[i_Map][iInput].first;} - size_t GetOutputIndex(size_t i_Map, size_t iOutput){return Output_Map[i_Map][iOutput].first;} - size_t GetANNOutputIndex(size_t i_Map, size_t iOutput){return Output_Map[i_Map][iOutput].second;} - size_t GetNMappedOutputs(size_t i_Map){return Output_Map[i_Map].size();} - - vector> GetOutputMapping(size_t i_map){return Output_Map[i_map];} - vector> GetInputMapping(size_t i_map){return Input_Map[i_map];} - - vector GetANN_Inputs(size_t i_Map, vector&inputs){ - vector ANN_input; - ANN_input.resize(Input_Map[i_Map].size()); - - for(size_t iInput=0; iInput GetANN_Outputs(size_t i_Map, vector&outputs){ - vector ANN_output; - ANN_output.resize(Output_Map[i_Map].size()); - - for(size_t iOutput=0; iOutput. - */ -#pragma once - -#include -#include -#include -#include - -#include "../../../../Common/include/CConfig.hpp" -#include "Neuron.hpp" -#include "../../../../Common/include/linear_algebra/blas_structure.hpp" -using namespace std; - -class Layer -{ -private: - unsigned long number_of_neurons; - Neuron * neurons; - bool input; - string activation_type; -public: - Layer(); - Layer(unsigned long n_neurons); - void setNNeurons(unsigned long n_neurons); - unsigned long getNNeurons(){return number_of_neurons;}; - void sayhi(); - void setInput(bool def){input = def;}; - bool isInput(){return input;}; - void setValue(size_t i_neuron, su2double value){neurons[i_neuron].setValue(value);} - su2double getValue(size_t i_neuron){return neurons[i_neuron].getValue();} - void setBias(size_t i_neuron, su2double value){neurons[i_neuron].setBias(value);} - su2double getBias(size_t i_neuron){return neurons[i_neuron].getBias();} - su2double getdYdX(size_t i_neuron){return neurons[i_neuron].getGradient();} - string getActivationType(){return activation_type;} - ~Layer(){ - delete [] neurons; - }; -}; diff --git a/SU2_CFD/include/numerics/multilayer_perceptron/Neuron.hpp b/SU2_CFD/include/numerics/multilayer_perceptron/Neuron.hpp deleted file mode 100644 index 12e1012b09a1..000000000000 --- a/SU2_CFD/include/numerics/multilayer_perceptron/Neuron.hpp +++ /dev/null @@ -1,74 +0,0 @@ -/*! - * \file LookUp_ANN.hpp - * \brief Declaration of artificial neural network perceptron class - * \author E. Bunschoten - * \version 7.4.0 "Blackbird" - * - * SU2 Project Website: https://su2code.github.io - * - * The SU2 Project is maintained by the SU2 Foundation - * (http://su2foundation.org) - * - * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) - * - * SU2 is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * SU2 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with SU2. If not, see . - */ -#pragma once - -#include -#include -#include -#include - -#include "../../../../Common/include/CConfig.hpp" -#include "../../../../Common/include/linear_algebra/blas_structure.hpp" - -using namespace std; -class Neuron -{ -private: - string activation_type="SmoothSlope"; - unsigned long ActivationFunction{ENUM_ACTIVATION_FUNCTION::LINEAR}; - unsigned long i_neuron; - double value; - su2double dy_dx; - double bias{0}; - enum ENUM_ACTIVATION_FUNCTION { - NONE=0, - LINEAR=1, - RELU=2, - SMOOTH_SLOPE=3 - }; - -public: - Neuron(){}; - void setNumber(unsigned long input){i_neuron = input;} - unsigned long getNumber(){return i_neuron;} - //void setFunctionType(string input); - - //string getFunctionType(){return activation_type;} - //su2double activation_function(su2double x); - void setValue(su2double input){value = input;} - su2double getValue(){return value;} - - void setBias(su2double input){bias = input;} - su2double getBias(){return bias;} - - su2double getGradient(){return dy_dx;} - void setGradient(su2double input){dy_dx = input;} - //void SetActivationFunction(unsigned long input){ActivationFunction = input;} - - ~Neuron(){}; -}; - diff --git a/SU2_CFD/obj/Makefile.am b/SU2_CFD/obj/Makefile.am index 140433ec93a2..361d69d5af9b 100644 --- a/SU2_CFD/obj/Makefile.am +++ b/SU2_CFD/obj/Makefile.am @@ -57,6 +57,7 @@ libSU2Core_sources = ../src/definition_structure.cpp \ ../src/fluid/CNEMOGas.cpp \ ../src/fluid/CSU2TCLib.cpp \ ../src/fluid/CMutationTCLib.cpp \ + ../src/fluid/CMLPGas.cpp \ ../src/integration/CIntegration.cpp \ ../src/integration/CSingleGridIntegration.cpp \ ../src/integration/CMultiGridIntegration.cpp \ @@ -116,12 +117,6 @@ libSU2Core_sources = ../src/definition_structure.cpp \ ../src/numerics/elasticity/CFEALinearElasticity.cpp \ ../src/numerics/elasticity/CFEANonlinearElasticity.cpp \ ../src/numerics/elasticity/nonlinear_models.cpp \ - ../src/numerics/multilayer_perceptron/LookUp_ANN.cpp \ - ../src/numerics/multilayer_perceptron/IOMap.cpp \ - ../src/numerics/multilayer_perceptron/NeuralNetwork.cpp \ - ../src/numerics/multilayer_perceptron/Neuron.cpp \ - ../src/numerics/multilayer_perceptron/Layer.cpp \ - ../src/numerics/multilayer_perceptron/ReadNeuralNetwork.cpp \ ../include/numerics_simd/CNumericsSIMD.cpp \ ../src/output/filewriter/CCSVFileWriter.cpp \ ../src/output/filewriter/CSTLFileWriter.cpp \ diff --git a/SU2_CFD/src/fluid/CMLPGas_Template.cpp b/SU2_CFD/src/fluid/CMLPGas_Template.cpp new file mode 100644 index 000000000000..44e00c5ec8de --- /dev/null +++ b/SU2_CFD/src/fluid/CMLPGas_Template.cpp @@ -0,0 +1,125 @@ +/*! + * \file CMLPGas_Template.cpp + * \brief Source of the data-driven fluid model class using + * multilayer perceptrons for data regression + * \author E.Bunschoten + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ + +#include "../../include/fluid/CMLPGas_Template.hpp" + +CMLPGas_Template::CMLPGas_Template(su2double gamma, su2double R, bool CompEntropy) : CFluidModel() { + Gamma = gamma; + Gamma_Minus_One = Gamma - 1.0; + Gas_Constant = R; + Cp = Gamma / Gamma_Minus_One * Gas_Constant; + Cv = Cp - R; + + ComputeEntropy = CompEntropy; + + /* Define a CLookUp_ANN object to allow for data regression */ + ann_input_filename = "MLP_collection.mlp"; + lookup_ann = new MLPToolbox::CLookUp_ANN(ann_input_filename); + + /* Map MLP inputs to outputs for each look-up operation*/ + MapInputs_to_Outputs(); +} + +CMLPGas_Template::~CMLPGas_Template(){ + delete iomap_rhoe; + delete iomap_PT; + delete iomap_Prho; + delete lookup_ann; +} +void CMLPGas_Template::MapInputs_to_Outputs(){ + + input_names_rhoe.push_back("rho"); + input_names_rhoe.push_back("e"); + + output_names_rhoe.push_back("Temperature"); + outputs_rhoe.push_back(&Temperature); + output_names_rhoe.push_back("Pressure"); + outputs_rhoe.push_back(&Pressure); + output_names_rhoe.push_back("SoundSpeed2"); + outputs_rhoe.push_back(&SoundSpeed2); + output_names_rhoe.push_back("dPdrho_e"); + outputs_rhoe.push_back(&dPdrho_e); + output_names_rhoe.push_back("dPde_rho"); + outputs_rhoe.push_back(&dPde_rho); + output_names_rhoe.push_back("dTdrho_e"); + outputs_rhoe.push_back(&dTdrho_e); + output_names_rhoe.push_back("dTde_rho"); + outputs_rhoe.push_back(&dTde_rho); + output_names_rhoe.push_back("Entropy"); + outputs_rhoe.push_back(&Entropy); + + iomap_rhoe = new MLPToolbox::CIOMap(lookup_ann, input_names_rhoe, output_names_rhoe); + + input_names_PT.push_back("P"); + input_names_PT.push_back("T"); + + output_names_PT.push_back("rho"); + outputs_PT.push_back(&Density); + output_names_PT.push_back("e"); + outputs_PT.push_back(&StaticEnergy); + + iomap_PT = new MLPToolbox::CIOMap(lookup_ann, input_names_PT, output_names_PT); + + input_names_Prho.push_back("P"); + input_names_Prho.push_back("rho"); + + output_names_Prho.push_back("e"); + outputs_Prho.push_back(&StaticEnergy); + iomap_Prho = new MLPToolbox::CIOMap(lookup_ann, input_names_Prho, output_names_Prho); +} + +void CMLPGas_Template::SetTDState_rhoe(su2double rho, su2double e) { + vector ann_inputs; + ann_inputs.push_back(rho); + ann_inputs.push_back(e); + lookup_ann->Predict_ANN(iomap_rhoe, ann_inputs, outputs_rhoe); +} + +void CMLPGas_Template::SetTDState_PT(su2double P, su2double T) { + vector ann_inputs; + ann_inputs.push_back(P); + ann_inputs.push_back(T); + lookup_ann->Predict_ANN(iomap_PT, ann_inputs, outputs_PT); + + SetTDState_rhoe(Density, StaticEnergy); +} + +void CMLPGas_Template::SetTDState_Prho(su2double P, su2double rho) { + vector ann_inputs; + ann_inputs.push_back(P); + ann_inputs.push_back(rho); + lookup_ann->Predict_ANN(iomap_Prho, ann_inputs, outputs_Prho); + SetTDState_rhoe(rho, StaticEnergy); +} + +void CMLPGas_Template::SetEnergy_Prho(su2double P, su2double rho) { + vector ann_inputs; + ann_inputs.push_back(P); + ann_inputs.push_back(rho); + lookup_ann->Predict_ANN(iomap_Prho, ann_inputs, outputs_Prho); +} diff --git a/SU2_CFD/src/meson.build b/SU2_CFD/src/meson.build index 6e730d80ffc0..e477f80b2b65 100644 --- a/SU2_CFD/src/meson.build +++ b/SU2_CFD/src/meson.build @@ -10,7 +10,8 @@ su2_cfd_src += files(['fluid/CFluidModel.cpp', 'fluid/CVanDerWaalsGas.cpp', 'fluid/CNEMOGas.cpp', 'fluid/CMutationTCLib.cpp', - 'fluid/CSU2TCLib.cpp']) + 'fluid/CSU2TCLib.cpp', + 'fluid/CMLPGas.cpp']) su2_cfd_src += files(['output/COutputFactory.cpp', 'output/CAdjElasticityOutput.cpp', @@ -147,13 +148,7 @@ su2_cfd_src += files(['numerics/CNumerics.cpp', 'numerics/elasticity/CFEALinearElasticity.cpp', 'numerics/elasticity/CFEANonlinearElasticity.cpp', 'numerics/elasticity/nonlinear_models.cpp', - 'numerics/CGradSmoothing.cpp', - 'numerics/multilayer_perceptron/LookUp_ANN.cpp', - 'numerics/multilayer_perceptron/IOMap.cpp', - 'numerics/multilayer_perceptron/NeuralNetwork.cpp', - 'numerics/multilayer_perceptron/Layer.cpp', - 'numerics/multilayer_perceptron/Neuron.cpp', - 'numerics/multilayer_perceptron/ReadNeuralNetwork.cpp']) + 'numerics/CGradSmoothing.cpp']) su2_cfd_src += files(['../include/numerics_simd/CNumericsSIMD.cpp']) diff --git a/SU2_CFD/src/numerics/multilayer_perceptron/Neuron.cpp b/SU2_CFD/src/numerics/multilayer_perceptron/Neuron.cpp deleted file mode 100644 index 4fbef6a462e6..000000000000 --- a/SU2_CFD/src/numerics/multilayer_perceptron/Neuron.cpp +++ /dev/null @@ -1,114 +0,0 @@ -/*! - * \file Neuron.cpp - * \brief Implementation of the neuron class to be used within the - * Layer class as a part of the NeuralNetwork class. - * \author E. Bunschoten - * \version 7.4.0 "Blackbird" - * - * SU2 Project Website: https://su2code.github.io - * - * The SU2 Project is maintained by the SU2 Foundation - * (http://su2foundation.org) - * - * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) - * - * SU2 is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * SU2 is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with SU2. If not, see . - */ -#include "../../../include/numerics/multilayer_perceptron/Neuron.hpp" -#include -#include -#include - -using namespace std; -// Neuron::Neuron() -// { -// } - -// su2double Neuron::activation_function(su2double x) -// { -// x += bias; -// su2double tanx; -// switch(ActivationFunction){ -// case ENUM_ACTIVATION_FUNCTION::LINEAR: -// dy_dx = 1; -// return x; -// break; -// case ENUM_ACTIVATION_FUNCTION::RELU: -// if(x > 0.0){ -// dy_dx = 1.0; -// return x; -// }else{ -// dy_dx = 0.0; -// return 0.0; -// } -// break; -// case ENUM_ACTIVATION_FUNCTION::SMOOTH_SLOPE: -// tanx = tanh(x); -// if(tanx > x){ -// dy_dx = 4.0 / pow((exp(-x) + exp(x)), 2); -// return tanx; -// }else{ -// dy_dx = 1.0; -// return x; -// } -// break; -// default: -// SU2_MPI::Error(string("Unknown activation function type: ")+activation_type, -// CURRENT_FUNCTION); -// return 0.0; -// break; -// } -// // if(activation_type.compare("smooth_slope") == 0){ -// // su2double tanx = tanh(x); -// // if(tanx > x){ -// // dy_dx = 4.0 / pow((exp(-x) + exp(x)), 2); -// // return tanx; -// // }else{ -// // dy_dx = 1.0; -// // return x; -// // } - -// // } -// // if(activation_type.compare("linear") == 0){ -// // dy_dx = 1.0; -// // return x; -// // } -// // if(activation_type.compare("relu") == 0){ -// // su2double zero{0.0}; -// // if(x > zero){ -// // dy_dx = 1.0; -// // return x; -// // }else{ -// // dy_dx = 0.0; -// // return zero; -// // } -// // } - -// // SU2_MPI::Error(string("Unknown activation function type: ")+activation_type, -// // CURRENT_FUNCTION); - -// // return x; -// } - -// void Neuron::setFunctionType(string input){ -// activation_type = input; - -// if(input.compare("smooth_slope") == 0) ActivationFunction = ENUM_ACTIVATION_FUNCTION::SMOOTH_SLOPE; return; - -// if(input.compare("linear") == 0) ActivationFunction = ENUM_ACTIVATION_FUNCTION::LINEAR; return; - -// if(input.compare("relu") == 0) ActivationFunction = ENUM_ACTIVATION_FUNCTION::RELU; return; - -// if(input.compare("none") == 0) ActivationFunction = ENUM_ACTIVATION_FUNCTION::NONE; return; -// } \ No newline at end of file From 9e32ee0672289517ad7a3172b75e025b255eae44 Mon Sep 17 00:00:00 2001 From: EvertBunschoten Date: Thu, 15 Sep 2022 14:14:51 +0200 Subject: [PATCH 04/84] fixed build bug --- SU2_CFD/obj/Makefile.am | 2 +- SU2_CFD/src/meson.build | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/SU2_CFD/obj/Makefile.am b/SU2_CFD/obj/Makefile.am index 361d69d5af9b..5ba91fd0d4e0 100644 --- a/SU2_CFD/obj/Makefile.am +++ b/SU2_CFD/obj/Makefile.am @@ -57,7 +57,7 @@ libSU2Core_sources = ../src/definition_structure.cpp \ ../src/fluid/CNEMOGas.cpp \ ../src/fluid/CSU2TCLib.cpp \ ../src/fluid/CMutationTCLib.cpp \ - ../src/fluid/CMLPGas.cpp \ + ../src/fluid/CMLPGas_Template.cpp \ ../src/integration/CIntegration.cpp \ ../src/integration/CSingleGridIntegration.cpp \ ../src/integration/CMultiGridIntegration.cpp \ diff --git a/SU2_CFD/src/meson.build b/SU2_CFD/src/meson.build index e477f80b2b65..9d81ec13f717 100644 --- a/SU2_CFD/src/meson.build +++ b/SU2_CFD/src/meson.build @@ -11,7 +11,7 @@ su2_cfd_src += files(['fluid/CFluidModel.cpp', 'fluid/CNEMOGas.cpp', 'fluid/CMutationTCLib.cpp', 'fluid/CSU2TCLib.cpp', - 'fluid/CMLPGas.cpp']) + 'fluid/CMLPGas_Template.cpp']) su2_cfd_src += files(['output/COutputFactory.cpp', 'output/CAdjElasticityOutput.cpp', From b233278084092bf9fe26077b6f829cc763e7d981 Mon Sep 17 00:00:00 2001 From: EvertBunschoten Date: Thu, 15 Sep 2022 14:27:38 +0200 Subject: [PATCH 05/84] Fixed unused-variable error --- Common/src/toolboxes/multilayer_perceptron/CLookUp_ANN.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/Common/src/toolboxes/multilayer_perceptron/CLookUp_ANN.cpp b/Common/src/toolboxes/multilayer_perceptron/CLookUp_ANN.cpp index 331e08c11c73..f63e57038a7b 100644 --- a/Common/src/toolboxes/multilayer_perceptron/CLookUp_ANN.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CLookUp_ANN.cpp @@ -188,7 +188,6 @@ bool MLPToolbox::CLookUp_ANN::Check_Duplicate_Outputs(vector &output_nam for(size_t i_Output =0; i_Output < output_names.size(); i_Output++){ n_occurances = 0; for(size_t i_map=0; i_mapGetNANNs(); i_map++){ - size_t i_ANN = input_output_map->GetANNIndex(i_map); vector> output_map = input_output_map->GetOutputMapping(i_map); for(size_t j_Output=0; j_Output Date: Thu, 15 Sep 2022 14:44:28 +0200 Subject: [PATCH 06/84] Fixed unused-variable error --- Common/src/toolboxes/multilayer_perceptron/CLayer.cpp | 2 +- .../src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/Common/src/toolboxes/multilayer_perceptron/CLayer.cpp b/Common/src/toolboxes/multilayer_perceptron/CLayer.cpp index 74f1e702063f..b53a397fbd5e 100644 --- a/Common/src/toolboxes/multilayer_perceptron/CLayer.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CLayer.cpp @@ -29,7 +29,7 @@ #include using namespace std; -MLPToolbox::CLayer::CLayer() : CLayer(1) {}; +MLPToolbox::CLayer::CLayer() : CLayer(1) {} MLPToolbox::CLayer::CLayer(unsigned long n_neurons) : number_of_neurons{n_neurons}, is_input{false} { diff --git a/Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp b/Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp index b43446626135..496ac851048f 100644 --- a/Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp @@ -33,8 +33,8 @@ using namespace std; void MLPToolbox::CNeuralNetwork::predict(vector &inputs){ - su2double x, x_norm, y, y_norm, tanx; - size_t iNeuron, jNeuron, iLayer, nNeurons_current, nNeurons_previous; + su2double x, x_norm, y, y_norm; + size_t iNeuron, iLayer, nNeurons_current; bool same_point{true}; for(iNeuron=0; iNeurongetNNeurons(); iNeuron++){ x_norm = (inputs[iNeuron] - input_norm[iNeuron].first)/(input_norm[iNeuron].second - input_norm[iNeuron].first); @@ -44,7 +44,6 @@ void MLPToolbox::CNeuralNetwork::predict(vector &inputs){ if(!same_point){ for(iLayer=1; iLayergetNNeurons(); - nNeurons_previous = total_layers[iLayer - 1]->getNNeurons(); for(iNeuron=0; iNeuron Date: Mon, 26 Sep 2022 10:59:15 +0200 Subject: [PATCH 07/84] Added more comments to CIOMap and CLookUp_ANN classes --- .../multilayer_perceptron/CIOMap.hpp | 171 ++++++++++++++---- .../multilayer_perceptron/CLayer.hpp | 17 +- .../multilayer_perceptron/CLookUp_ANN.hpp | 38 ++-- .../multilayer_perceptron/CNeuralNetwork.hpp | 52 +++--- .../multilayer_perceptron/CNeuron.hpp | 3 +- .../CReadNeuralNetwork.hpp | 43 +++-- .../multilayer_perceptron/CIOMap.cpp | 45 +++-- .../multilayer_perceptron/CLookUp_ANN.cpp | 18 +- .../multilayer_perceptron/CNeuralNetwork.cpp | 132 +++++++++----- SU2_CFD/src/fluid/CPengRobinson.cpp | 51 ++++++ 10 files changed, 395 insertions(+), 175 deletions(-) diff --git a/Common/include/toolboxes/multilayer_perceptron/CIOMap.hpp b/Common/include/toolboxes/multilayer_perceptron/CIOMap.hpp index f0ef33fefed1..8d0c97aa6960 100644 --- a/Common/include/toolboxes/multilayer_perceptron/CIOMap.hpp +++ b/Common/include/toolboxes/multilayer_perceptron/CIOMap.hpp @@ -32,56 +32,157 @@ #include #include #include -#include "../../CConfig.hpp" #include "../../linear_algebra/blas_structure.hpp" #include "CLookUp_ANN.hpp" -using namespace std; namespace MLPToolbox { class CLookUp_ANN; class CIOMap + /*! + *\class CIOMap + *\brief This class is used by the CLookUp_ANN class to assign user-defined inputs + * and outputs to loaded multi-layer perceptrons. When a look-up operation is called + * with a specific CIOMap, the multi-layer perceptrons are evaluated with input and + * output variables coinciding with the desired input and output variable names. + * + * + * For example, in a custom, data-driven fluid model, MLP's are used for thermodynamic state + * definition. There are three MLP's loaded. MLP_1 predicts temperature and specific heat + * based on density and energy. MLP_2 predicts pressure and speed of sound based on density and + * energy as well. MLP_3 predicts density and energy based on pressure and temperature. + * During a certain look-up operation in the CFluidModel, temperature, speed of sound and pressure + * are needed for a given density and energy. What the CIOMap does is to point to MLP_1 for + * temperature evalutation, and to MLP_2 for pressure and speed of sound evaluation. MLP_3 is + * not considered, as the respective inputs and outputs don't match with the function call + * inputs and outputs. + * + * call variables: MLP inputs: MLP outputs: call outputs: + * + * 2--> energy --| |--> temperature --> 1 + * |--> MLP_1 --| + * 1:density 1--> density --| |--> c_p 1:temperature + * 2:energy 2:speed of sound + * 1--> density --| |--> pressure --> 3 3:pressure + * |--> MLP_2 --| + * 2--> energy --| |--> speed of sound --> 2 + * + * pressure --| |--> density + * |--> MLP_3 --| + * temperature --| |--> energy + * + * + * \author E.Bunschoten + */ + { private: - vector ANN_indices; - vector>> Input_Map; - vector>> Output_Map; - vector> input_indices; - public: - CIOMap(){}; - CIOMap(CLookUp_ANN*ANN_collection, vector &inputs, vector &outputs); - ~CIOMap(){}; - void Push_ANN_index(size_t i_ANN){ANN_indices.push_back(i_ANN);}; - void PairVariableswithANNs(CLookUp_ANN * ANN_collection, vector &inputs, vector &outputs); - size_t GetNANNs(){return ANN_indices.size();} - size_t GetANNIndex(size_t i_Map){return ANN_indices[i_Map];} - size_t GetInputIndex(size_t i_Map, size_t iInput){return Input_Map[i_Map][iInput].first;} - size_t GetOutputIndex(size_t i_Map, size_t iOutput){return Output_Map[i_Map][iOutput].first;} - size_t GetANNOutputIndex(size_t i_Map, size_t iOutput){return Output_Map[i_Map][iOutput].second;} - size_t GetNMappedOutputs(size_t i_Map){return Output_Map[i_Map].size();} - - vector> GetOutputMapping(size_t i_map){return Output_Map[i_map];} - vector> GetInputMapping(size_t i_map){return Input_Map[i_map];} + std::vector MLP_indices; /*!< Loaded MLP index */ + + std::vector>> + Input_Map, /*!< Mapping of call variable inputs to matching MLP inputs */ + Output_Map; /*!< Mapping of call variable outputs to matching MLP outputs */ + + std::vector> input_indices; /*!< */ - vector GetANN_Inputs(size_t i_Map, vector&inputs){ - vector ANN_input; - ANN_input.resize(Input_Map[i_Map].size()); + public: - for(size_t iInput=0; iInput GetANN_Outputs(size_t i_Map, vector&outputs){ - vector ANN_output; - ANN_output.resize(Output_Map[i_Map].size()); + /*! + * \brief CIOMap class constructor; + * \param[in] MLP_collection - Pointer to CLookUp_ANN class for which the input-output amap is to be created + * \param[in] inputs - Input names for the call function. These should match with at least one of the MLP inputs. + * \param[in] outputs - Output names for the call function. These should match with at least one of the MLP outputs. + */ + CIOMap(CLookUp_ANN*MLP_collection, std::vector &inputs, std::vector &outputs); + + /*! + * \brief Set MLP index in IO map + * \param[in] i_MLP - loaded MLP index + */ + void Push_MLP_index(std::size_t i_MLP){MLP_indices.push_back(i_MLP);}; + + /*! + * \brief Pair call inputs and outputs with the inputs and outputs of + the loaded MLPs + * \param[in] MLP_collection - pointer to MLP collection class + * \param[in] inputs - vector with call input variable names + * \param[in] outputs - vector with call output variable names + */ + void PairVariableswithMLPs(CLookUp_ANN * MLP_collection, std::vector &inputs, std::vector &outputs); + + /*! + * \brief Get the number of MLPs in the current IO map + * \return number of MLPs with matching inputs and output(s) + */ + std::size_t GetNMLPs() const {return MLP_indices.size();} + + /*! + * \brief Get the loaded MLP index + * \return MLP index + */ + std::size_t GetMLPIndex(std::size_t i_Map) const {return MLP_indices[i_Map];} + + /*! + * \brief Get the call input variable index + * \param[in] i_Map - input-output mapping index of the IO map + * \param[in] i_Input - input index of the call input variable + * \return MLP input variable index + */ + std::size_t GetInputIndex(std::size_t i_Map, std::size_t i_Input) const {return Input_Map[i_Map][i_Input].first;} + + /*! + * \brief Get the call output variable index + * \param[in] i_Map - input-output mapping index of the IO map + * \param[in] i_Output - output index of the call input variable + * \return call variable output index + */ + std::size_t GetOutputIndex(std::size_t i_Map, std::size_t i_Output) const {return Output_Map[i_Map][i_Output].first;} + + /*! + * \brief Get the MLP output variable index + * \param[in] i_Map - input-output mapping index of the IO map + * \param[in] i_Output - output index of the call input variable + * \return MLP output variable index + */ + std::size_t GetMLPOutputIndex(std::size_t i_Map, std::size_t i_Output) const {return Output_Map[i_Map][i_Output].second;} + + /*! + * \brief Get the number of matching output variables between the call and MLP outputs + * \param[in] i_Map - input-output mapping index of the IO map + * \return Number of matching variables between the loaded MLP and call variables + */ + std::size_t GetNMappedOutputs(std::size_t i_Map) const {return Output_Map[i_Map].size();} + + /*! + * \brief Get the mapping of MLP outputs matching to call outputs + * \param[in] i_Map - input-output mapping index of the IO map + * \return Mapping of MLP output variables to call variables + */ + std::vector> GetOutputMapping(std::size_t i_map) const {return Output_Map[i_map];} + + /*! + * \brief Get the mapping of MLP inputs to call inputs + * \param[in] i_Map - input-output mapping index of the IO map + * \return Mapping of MLP input variables to call inputs + */ + std::vector> GetInputMapping(std::size_t i_map) const{return Input_Map[i_map];} + + /*! + * \brief Get the mapped inputs for the MLP at i_Map + * \param[in] i_Map - input-output mapping index of the IO map + * \param[in] inputs - call inputs + * \return Vector with call inputs in the correct order of the loaded MLP + */ + std::vector GetMLP_Inputs(std::size_t i_Map, std::vector&inputs) const { + std::vector MLP_input; + MLP_input.resize(Input_Map[i_Map].size()); - for(size_t iOutput=0; iOutput NeuralNetworks; /*!< Vector containing all loaded neural networks. */ + std::vector NeuralNetworks; /*!< std::vector containing all loaded neural networks. */ - vector ANN_filenames; /*!< Filenames containing ANN architecture information. */ + std::vector ANN_filenames; /*!< Filenames containing ANN architecture information. */ unsigned long number_of_variables; /*!< Number of loaded ANNs. */ @@ -59,20 +67,20 @@ class CLookUp_ANN * \param[in] flag - line to search for * \return line in file. */ - string SkipToFlag(ifstream *file_stream, string flag); + std::string SkipToFlag(ifstream *file_stream, std::string flag); /*! * \brief Load ANN architecture * \param[in] ANN - pointer to target NeuralNetwork class * \param[in] filename - filename containing ANN architecture information */ - void GenerateANN(CNeuralNetwork*ANN, string filename); + void GenerateANN(CNeuralNetwork*ANN, std::string filename); /*! * \brief Read ANN architecture input file * \param[in] filename - filename containing ANN architecture information */ - void ReadANNInputFile(string fileName); + void ReadANNInputFile(std::string fileName); public: @@ -80,7 +88,7 @@ class CLookUp_ANN * \brief ANN collection class constructor * \param[in] filename - filename containing list of ANN input files */ - CLookUp_ANN(string fileName); + CLookUp_ANN(std::string fileName); /*! * \brief Evaluate loaded ANNs for given inputs and outputs @@ -88,9 +96,9 @@ class CLookUp_ANN * \param[in] inputs - input values * \param[in] outputs - pointers to output variables */ - void Predict_ANN(CIOMap *input_output_map, vector &inputs, vector &outputs); + void Predict_ANN(CIOMap *input_output_map, std::vector &inputs, std::vector &outputs); - ~CLookUp_ANN(){for(size_t i_ANN=0; i_ANN &output_names, CIOMap *input_output_map) const; + bool Check_Duplicate_Outputs(std::vector &output_names, CIOMap *input_output_map) const; /*! * \brief Check if all output variables are present in the loaded ANNs * \param[in] output_names - output variable names to check * \param[in] input_output_map - pointer to input-output map to be checked */ - bool Check_Use_of_Outputs(vector &output_names, CIOMap *input_output_map) const; + bool Check_Use_of_Outputs(std::vector &output_names, CIOMap *input_output_map) const; /*! * \brief Check if all input variables are present in the loaded ANNs * \param[in] input_names - input variable names to check * \param[in] input_output_map - pointer to input-output map to be checked */ - bool Check_Use_of_Inputs(vector &input_names, CIOMap *input_output_map) const; + bool Check_Use_of_Inputs(std::vector &input_names, CIOMap *input_output_map) const; /*! * \brief Map variable names to ANN inputs or outputs @@ -127,7 +135,7 @@ class CLookUp_ANN * \param[in] variable_names - variable names to map to ANN inputs or outputs * \param[in] input - map to inputs (true) or outputs (false) */ - vector> FindVariable_Indices(size_t i_ANN, vector variable_names, bool input) const; + std::vector> FindVariable_Indices(std::size_t i_ANN, std::vector variable_names, bool input) const; }; diff --git a/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp b/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp index 6e54cd363a86..a7d1cfba5e43 100644 --- a/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp +++ b/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp @@ -37,12 +37,11 @@ #include "../../linear_algebra/blas_structure.hpp" #include "CLayer.hpp" -using namespace std; namespace MLPToolbox{ class CNeuralNetwork { private: - vector input_names, + std::vector input_names, output_names; unsigned long n_hidden_layers; @@ -50,16 +49,16 @@ class CNeuralNetwork CLayer *inputLayer, *outputLayer; - vector hiddenLayers, + std::vector hiddenLayers, total_layers; - vector>> weights; - vector weights_mat; - vector> values; + //std::vector>> weights; + std::vector weights_mat; + std::vector> values; - vector> input_norm, + std::vector> input_norm, output_norm; - vector last_inputs; + std::vector last_inputs; su2double* ANN_outputs; @@ -68,14 +67,19 @@ class CNeuralNetwork LINEAR=1, RELU=2, SMOOTH_SLOPE=3, - ELU = 4 + ELU = 4, + GELU = 5, + SELU = 6, + SIGMOID = 7, + SWISH = 8, + TANH = 9 }; ENUM_ACTIVATION_FUNCTION * activation_function_types; public: CNeuralNetwork(); ~CNeuralNetwork(){ - for(size_t i=0; isetBias(i_neuron, value);} - void setActivationFunction(unsigned long i_layer, string input); + void setActivationFunction(unsigned long i_layer, std::string input); void displayNetwork(); void sizeWeights(); void sizeInputs(unsigned long n_inputs){last_inputs.resize(n_inputs); for(unsigned long iInput=0; iInputgetNNeurons();} - unsigned long getNNeurons(unsigned long iLayer, unsigned long iNeuron){return weights.at(iLayer).at(iNeuron).size();} + //unsigned long getNNeurons(unsigned long iLayer, unsigned long iNeuron){return weights.at(iLayer).at(iNeuron).size();} - void predict(vector &inputs); + void predict(std::vector &inputs); void SetInputNorm(unsigned long iInput, su2double input_min, su2double input_max){input_norm.at(iInput) = make_pair(input_min, input_max);} void SetOutputNorm(unsigned long iOutput, su2double output_min, su2double output_max){output_norm.at(iOutput) = make_pair(output_min, output_max);} - void PushOutputName(string input){output_names.push_back(input);} - void PushInputName(string input){input_names.push_back(input);} + void PushOutputName(std::string input){output_names.push_back(input);} + void PushInputName(std::string input){input_names.push_back(input);} - string GetInputName(size_t iInput){return input_names[iInput];} - string GetOutputName(size_t iOutput){return output_names[iOutput];} + std::string GetInputName(std::size_t iInput){return input_names[iInput];} + std::string GetOutputName(std::size_t iOutput){return output_names[iOutput];} - size_t GetnInputs(){return input_names.size();} - size_t GetnOutputs(){return output_names.size();} + std::size_t GetnInputs(){return input_names.size();} + std::size_t GetnOutputs(){return output_names.size();} - su2double GetANN_Output(size_t iOutput){return ANN_outputs[iOutput];} + su2double GetANN_Output(std::size_t iOutput){return ANN_outputs[iOutput];} void SizeActivationFunctions(unsigned long n_layers){activation_function_types = new ENUM_ACTIVATION_FUNCTION[n_layers]; } - su2double ComputeX(size_t iLayer, size_t iNeuron){ + su2double ComputeX(std::size_t iLayer, std::size_t iNeuron){ su2double x; x = total_layers[iLayer]->getBias(iNeuron); - size_t nNeurons_previous = total_layers[iLayer - 1]->getNNeurons(); - for(size_t jNeuron=0; jNeurongetNNeurons(); + for(std::size_t jNeuron=0; jNeurongetOutput(jNeuron); } return x; diff --git a/Common/include/toolboxes/multilayer_perceptron/CNeuron.hpp b/Common/include/toolboxes/multilayer_perceptron/CNeuron.hpp index fed140d4457e..3e1cefd1d6f9 100644 --- a/Common/include/toolboxes/multilayer_perceptron/CNeuron.hpp +++ b/Common/include/toolboxes/multilayer_perceptron/CNeuron.hpp @@ -32,9 +32,8 @@ #include #include "../../CConfig.hpp" -#include "../../linear_algebra/blas_structure.hpp" +//#include "../../linear_algebra/blas_structure.hpp" -using namespace std; namespace MLPToolbox{ class CNeuron { diff --git a/Common/include/toolboxes/multilayer_perceptron/CReadNeuralNetwork.hpp b/Common/include/toolboxes/multilayer_perceptron/CReadNeuralNetwork.hpp index d471e42d9399..7921023762bf 100644 --- a/Common/include/toolboxes/multilayer_perceptron/CReadNeuralNetwork.hpp +++ b/Common/include/toolboxes/multilayer_perceptron/CReadNeuralNetwork.hpp @@ -30,46 +30,45 @@ #include #include #include - +#include #include "../../CConfig.hpp" -#include "../../linear_algebra/blas_structure.hpp" -using namespace std; namespace MLPToolbox{ + class CReadNeuralNetwork { private: - vector input_names; - vector output_names; + std::vector input_names; + std::vector output_names; - string filename; + std::string filename; unsigned long n_layers; - vector n_neurons; - vector>> weights; - vector> biases; - vector activation_functions; + std::vector n_neurons; + std::vector>> weights; + std::vector> biases; + std::vector activation_functions; - vector> input_norm; - vector> output_norm; + std::vector> input_norm; + std::vector> output_norm; public: - CReadNeuralNetwork(string filename_in); + CReadNeuralNetwork(std::string filename_in); void ReadMLPFile(); - string SkipToFlag(ifstream *file_stream, string flag); + std::string SkipToFlag(std::ifstream *file_stream, std::string flag); unsigned long GetNInputs(){return n_neurons.at(0);} unsigned long GetNOutputs(){return n_neurons.at(n_layers - 1);} unsigned long GetNlayers(){return n_layers;} - unsigned long GetNneurons(size_t iLayer){return n_neurons.at(iLayer);} - double long GetWeight(size_t iLayer, size_t iNeuron, size_t jNeuron){return weights.at(iLayer).at(iNeuron).at(jNeuron);} - double long GetBias(size_t iLayer, size_t iNeuron){return biases.at(iLayer).at(iNeuron);} - pair GetInputNorm(size_t iInput){return input_norm.at(iInput);} - pair GetOutputNorm(size_t iOutput){return output_norm.at(iOutput);} - string GetActivationFunction(size_t iLayer){return activation_functions.at(iLayer);} + unsigned long GetNneurons(std::size_t iLayer){return n_neurons.at(iLayer);} + su2double GetWeight(std::size_t iLayer, std::size_t iNeuron, std::size_t jNeuron){return weights.at(iLayer).at(iNeuron).at(jNeuron);} + su2double GetBias(std::size_t iLayer, std::size_t iNeuron){return biases.at(iLayer).at(iNeuron);} + std::pair GetInputNorm(std::size_t iInput){return input_norm.at(iInput);} + std::pair GetOutputNorm(std::size_t iOutput){return output_norm.at(iOutput);} + std::string GetActivationFunction(std::size_t iLayer){return activation_functions.at(iLayer);} - string GetInputName(size_t iInput){return input_names.at(iInput);} - string GetOutputName(size_t iOutput){return output_names.at(iOutput);} + std::string GetInputName(std::size_t iInput){return input_names.at(iInput);} + std::string GetOutputName(std::size_t iOutput){return output_names.at(iOutput);} ~CReadNeuralNetwork(){}; }; diff --git a/Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp b/Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp index d6bba0ce8b25..789988af101c 100644 --- a/Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp @@ -32,25 +32,48 @@ #include #include -MLPToolbox::CIOMap::CIOMap(CLookUp_ANN*ANN_collection, vector &inputs, vector &outputs){ - PairVariableswithANNs(ANN_collection, inputs, outputs); +MLPToolbox::CIOMap::CIOMap(CLookUp_ANN*MLP_collection, vector &inputs, vector &outputs){ + /* Generate an input-output map given a set of call inputs and call outputs. These inputs are + mapped to the inputs of the loaded MLPs in the MLP_collection object. The call outputs are + then matched to the MLPs with matching inputs. + */ + PairVariableswithMLPs(MLP_collection, inputs, outputs); + + // Perform checks on input-output validity if(outputs.size() > 0){ - ANN_collection->Check_Use_of_Inputs(inputs, this); - ANN_collection->Check_Use_of_Outputs(outputs, this); - ANN_collection->Check_Duplicate_Outputs(outputs, this); + // Check wether all call inputs are used + MLP_collection->Check_Use_of_Inputs(inputs, this); + + // Check wether all call outputs are present in the MLP collection + MLP_collection->Check_Use_of_Outputs(outputs, this); + + // Check wether there are any duplicates in the MLP outputs + MLP_collection->Check_Duplicate_Outputs(outputs, this); } } -void MLPToolbox::CIOMap::PairVariableswithANNs(CLookUp_ANN*ANN_collection, vector &inputs, vector &outputs){ - +void MLPToolbox::CIOMap::PairVariableswithMLPs(CLookUp_ANN*MLP_collection, vector &inputs, vector &outputs){ + /* + In this function, the call inputs and outputs are matched to those within the MLP collection. + */ bool isInput, isOutput; - for(size_t i_ANN=0; i_ANNGetNANNs(); i_ANN++){ - vector> Input_Indices = ANN_collection->FindVariable_Indices(i_ANN, inputs, true); + + // Looping over the loaded MLPs to check wether the MLP inputs match with the call inputs + for(size_t i_MLP=0; i_MLPGetNANNs(); i_MLP++){ + + // Mapped call inputs to MLP inputs + vector> Input_Indices = MLP_collection->FindVariable_Indices(i_MLP, inputs, true); isInput = Input_Indices.size() > 0; + if(isInput){ - vector> Output_Indices = ANN_collection->FindVariable_Indices(i_ANN, outputs, false); + // Only when the MLP inputs match with a portion of the call inputs are the output variable checks performed + + vector> Output_Indices = MLP_collection->FindVariable_Indices(i_MLP, outputs, false); isOutput = Output_Indices.size() > 0; + if(isOutput){ - ANN_indices.push_back(i_ANN); + + // Update input and output mapping if both inputs and outputs match + MLP_indices.push_back(i_MLP); Input_Map.push_back(Input_Indices); Output_Map.push_back(Output_Indices); } diff --git a/Common/src/toolboxes/multilayer_perceptron/CLookUp_ANN.cpp b/Common/src/toolboxes/multilayer_perceptron/CLookUp_ANN.cpp index f63e57038a7b..4642a8a9bd41 100644 --- a/Common/src/toolboxes/multilayer_perceptron/CLookUp_ANN.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CLookUp_ANN.cpp @@ -73,12 +73,12 @@ vector> MLPToolbox::CLookUp_ANN::FindVariable_Indices(size_ } void MLPToolbox::CLookUp_ANN::Predict_ANN(CIOMap *input_output_map, vector& inputs, vector& outputs){ - for(size_t i_map=0; i_mapGetNANNs(); i_map++){ - size_t i_ANN = input_output_map->GetANNIndex(i_map); - vector ANN_inputs = input_output_map->GetANN_Inputs(i_map, inputs); + for(size_t i_map=0; i_mapGetNMLPs(); i_map++){ + size_t i_ANN = input_output_map->GetMLPIndex(i_map); + vector ANN_inputs = input_output_map->GetMLP_Inputs(i_map, inputs); NeuralNetworks[i_ANN]->predict(ANN_inputs); for(size_t i=0; i < input_output_map->GetNMappedOutputs(i_map); i++){ - *outputs[input_output_map->GetOutputIndex(i_map, i)] = NeuralNetworks[i_ANN]->GetANN_Output(input_output_map->GetANNOutputIndex(i_map, i)); + *outputs[input_output_map->GetOutputIndex(i_map, i)] = NeuralNetworks[i_ANN]->GetANN_Output(input_output_map->GetMLPOutputIndex(i_map, i)); } } } @@ -111,7 +111,7 @@ void MLPToolbox::CLookUp_ANN::GenerateANN(CNeuralNetwork * ANN, string fileName) for(size_t i_layer = 0; i_layer < ANN->getNWeightLayers(); i_layer++){ ANN->setActivationFunction(i_layer, Reader.GetActivationFunction(i_layer)); for(size_t i_neuron=0; i_neuron < ANN->getNNeurons(i_layer); i_neuron++){ - for(size_t j_neuron=0; j_neurongetNNeurons(i_layer, i_neuron); j_neuron++){ + for(size_t j_neuron=0; j_neurongetNNeurons(i_layer+1); j_neuron++){ ANN->setWeight(i_layer, i_neuron, j_neuron, Reader.GetWeight(i_layer, i_neuron, j_neuron)); } } @@ -175,7 +175,7 @@ string MLPToolbox::CLookUp_ANN::SkipToFlag(ifstream *file_stream, string flag) { } if ((*file_stream).eof()) - SU2_MPI::Error("Flag not found in file", CURRENT_FUNCTION); + SU2_MPI::Error("Flag " + flag + " not found in file", CURRENT_FUNCTION); return line; } @@ -187,7 +187,7 @@ bool MLPToolbox::CLookUp_ANN::Check_Duplicate_Outputs(vector &output_nam vector duplicate_variables; for(size_t i_Output =0; i_Output < output_names.size(); i_Output++){ n_occurances = 0; - for(size_t i_map=0; i_mapGetNANNs(); i_map++){ + for(size_t i_map=0; i_mapGetNMLPs(); i_map++){ vector> output_map = input_output_map->GetOutputMapping(i_map); for(size_t j_Output=0; j_Output missing_inputs; bool inputs_are_present{true}; for(size_t iInput=0; iInputGetNANNs(); i_map++){ + for(size_t i_map=0; i_map < input_output_map->GetNMLPs(); i_map++){ vector> input_map = input_output_map->GetInputMapping(i_map); for(size_t jInput=0; jInput &output_names, bool found_output{false}; /* Looping over all the selected ANNs */ - for(size_t i_map=0; i_map < input_output_map->GetNANNs(); i_map++){ + for(size_t i_map=0; i_map < input_output_map->GetNMLPs(); i_map++){ vector> output_map = input_output_map->GetOutputMapping(i_map); /* Looping over the outputs of the output map of the current ANN */ diff --git a/Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp b/Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp index 496ac851048f..ebc712e15e52 100644 --- a/Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CNeuralNetwork.cpp @@ -42,6 +42,8 @@ void MLPToolbox::CNeuralNetwork::predict(vector &inputs){ inputLayer->setOutput(iNeuron, x_norm); } if(!same_point){ + su2double alpha=1.67326324; + su2double lambda=1.05070098; for(iLayer=1; iLayergetNNeurons(); @@ -54,14 +56,22 @@ void MLPToolbox::CNeuralNetwork::predict(vector &inputs){ case ENUM_ACTIVATION_FUNCTION::SMOOTH_SLOPE: for(iNeuron=0; iNeurongetInput(iNeuron); - y = x > 0 ? x : tanh(x); + if(x > 0){ + y = x; + }else{ + y = tanh(x); + } total_layers[iLayer]->setOutput(iNeuron, y); } break; case ENUM_ACTIVATION_FUNCTION::ELU: for(iNeuron=0; iNeurongetInput(iNeuron); - y = x > 0 ? x : (exp(x) - 1); + if(x > 0){ + y = x; + }else{ + y = exp(x) - 1; + } total_layers[iLayer]->setOutput(iNeuron, y); } break; @@ -75,7 +85,52 @@ void MLPToolbox::CNeuralNetwork::predict(vector &inputs){ case ENUM_ACTIVATION_FUNCTION::RELU: for(iNeuron=0; iNeurongetInput(iNeuron); - y = x > 0.0 ? x : 0.0; + if(x > 0){ + y = x; + }else{ + y = 0.0; + } + total_layers[iLayer]->setOutput(iNeuron, y); + } + break; + case ENUM_ACTIVATION_FUNCTION::SWISH: + for(iNeuron=0; iNeurongetInput(iNeuron); + y = x / (1 + exp(-x)); + total_layers[iLayer]->setOutput(iNeuron, y); + } + break; + case ENUM_ACTIVATION_FUNCTION::TANH: + for(iNeuron=0; iNeurongetInput(iNeuron); + y = tanh(x); + total_layers[iLayer]->setOutput(iNeuron, y); + } + break; + case ENUM_ACTIVATION_FUNCTION::SIGMOID: + for(iNeuron=0; iNeurongetInput(iNeuron); + y = 1.0/(1 + exp(-x)); + total_layers[iLayer]->setOutput(iNeuron, y); + } + break; + case ENUM_ACTIVATION_FUNCTION::SELU: + for(iNeuron=0; iNeurongetInput(iNeuron); + if(x > 0.0){ + y = lambda * x; + }else{ + y = lambda * alpha * (exp(x) - 1); + } + total_layers[iLayer]->setOutput(iNeuron, y); + } + break; + case ENUM_ACTIVATION_FUNCTION::GELU: + + for(iNeuron=0; iNeurongetInput(iNeuron); + + y = 0.5 * x * (1 + tanh(sqrt(2 / PI_NUMBER) * (x + 0.044715 * pow(x, 3)))); total_layers[iLayer]->setOutput(iNeuron, y); } break; @@ -108,45 +163,24 @@ MLPToolbox::CNeuralNetwork::CNeuralNetwork(){ void MLPToolbox::CNeuralNetwork::defineInputLayer(unsigned long n_neurons){ - //cout << "Creating an input layer with " << n_neurons << " neurons. "<< endl; inputLayer = new CLayer(n_neurons); inputLayer->setInput(true); input_norm.resize(n_neurons); } void MLPToolbox::CNeuralNetwork::defineOutputLayer(unsigned long n_neurons){ - //cout << "Creating an output layer with " << n_neurons << " neurons. "<< endl; outputLayer = new CLayer(n_neurons); output_norm.resize(n_neurons); } void MLPToolbox::CNeuralNetwork::push_hidden_layer(unsigned long n_neurons){ CLayer *newLayer = new CLayer(n_neurons); - //cout << "Creating a hidden layer with " << n_neurons << " neurons. "<< endl; hiddenLayers.push_back(newLayer); n_hidden_layers ++; } void MLPToolbox::CNeuralNetwork::sizeWeights(){ - unsigned long i_layer{0}; - weights.resize(n_hidden_layers + 1); - weights.at(i_layer).resize(inputLayer->getNNeurons()); - CLayer * previouslayer = inputLayer; - - if(n_hidden_layers != 0){ - for(size_t i_hidden_layer=0; i_hidden_layer < n_hidden_layers; i_hidden_layer++){ - for(size_t i_neuron=0; i_neuron < previouslayer->getNNeurons(); i_neuron++){ - weights.at(i_layer).at(i_neuron).resize(hiddenLayers.at(i_hidden_layer)->getNNeurons()); - } - previouslayer = hiddenLayers.at(i_hidden_layer); - i_layer ++; - weights.at(i_layer).resize(previouslayer->getNNeurons()); - } - } - for(size_t i_neuron=0; i_neuron < previouslayer->getNNeurons(); i_neuron++){ - weights.at(i_layer).at(i_neuron).resize(outputLayer->getNNeurons()); - } - + total_layers.resize(n_hidden_layers + 2); total_layers.at(0) = inputLayer; for(size_t iLayer=0; iLayergetNNeurons() << " neurons, Activation Function: " << inputLayer->getActivationType() << endl; - for(size_t i=0; igetNNeurons(); i++){ - for(size_t j=0; jgetNNeurons(); j++){ - cout << weights_mat[0][i][j] << " "; - } - } - for(size_t i_layer=0; i_layer < hiddenLayers.size(); i_layer++){ - cout << "Hidden layer " << i_layer + 1 << ": " << hiddenLayers.at(i_layer)->getNNeurons() << " neurons, Activation Function: " << hiddenLayers.at(i_layer) ->getActivationType() << endl; - for(size_t i=0; igetNNeurons() <<" neurons, Activation Function: " << outputLayer->getActivationType() << endl; + /*! + TODO: + find way of displaying network architectures + */ } void MLPToolbox::CNeuralNetwork::setActivationFunction(unsigned long i_layer, string input) { if(input.compare("linear") == 0){ - //activation_functions[i_layer] = new Linear(); activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::LINEAR; return; } if(input.compare("relu") == 0){ - //activation_functions[i_layer] = new Relu(); activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::RELU; return; } if((input.compare("smooth_slope") == 0)){ - //activation_functions[i_layer] = new SmoothSlope(); activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::SMOOTH_SLOPE; return; } if((input.compare("elu") == 0)){ - //activation_functions[i_layer] = new SmoothSlope(); activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::ELU; return; } + if(input.compare("swish") == 0){ + activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::SWISH; + return; + } + if(input.compare("sigmoid") == 0){ + activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::SIGMOID; + return; + } + if(input.compare("tanh") == 0){ + activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::TANH; + return; + } + if(input.compare("selu") == 0){ + activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::SELU; + return; + } + if(input.compare("gelu") == 0){ + activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::GELU; + return; + } if(input.compare("none") == 0){ - //activation_functions[i_layer] = new None(); activation_function_types[i_layer] = ENUM_ACTIVATION_FUNCTION::NONE; return; } - //total_layers.at(i_layer)->setActivationFunction(input); + SU2_MPI::Error("Unknown activation function type: " + input, CURRENT_FUNCTION); return; } diff --git a/SU2_CFD/src/fluid/CPengRobinson.cpp b/SU2_CFD/src/fluid/CPengRobinson.cpp index 66abe62f4f4c..962e8a60ea16 100644 --- a/SU2_CFD/src/fluid/CPengRobinson.cpp +++ b/SU2_CFD/src/fluid/CPengRobinson.cpp @@ -26,6 +26,7 @@ */ #include "../../include/fluid/CPengRobinson.hpp" +#include "../../../Common/include/toolboxes/multilayer_perceptron/CLookUp_ANN.hpp" CPengRobinson::CPengRobinson(su2double gamma, su2double R, su2double Pstar, su2double Tstar, su2double w) : CIdealGas(gamma, R) { @@ -38,6 +39,56 @@ CPengRobinson::CPengRobinson(su2double gamma, su2double R, su2double Pstar, su2d k = 0.37464 + 1.54226 * w - 0.26992 * w * w; else k = 0.379642 + 1.48503 * w - 0.164423 * w * w + 0.016666 * w * w * w; + + vector call_input_variables; + vector call_output_variables; + vector call_inputs; + vector call_outputs; + call_input_variables.push_back("Progress_Variable"); + call_inputs.push_back(-5.74934292655689e-01); + call_input_variables.push_back("Enthalpy"); + call_inputs.push_back(+2.22692201641688e+03); + call_input_variables.push_back("Mixture_Fraction"); + call_inputs.push_back(0.0144045); + + call_output_variables.push_back("Temperature"); + call_outputs.push_back(&Temperature); + call_output_variables.push_back("Density"); + call_outputs.push_back(&Density); + + vector call_inputvars_lean; + call_inputvars_lean.push_back("Progress_Variable_lean"); + call_inputvars_lean.push_back("Enthalpy_lean"); + call_inputvars_lean.push_back("Mixture_Fraction_lean"); + + vector call_inputvars_rich; + call_inputvars_rich.push_back("Progress_Variable_rich"); + call_inputvars_rich.push_back("Enthalpy_rich"); + call_inputvars_rich.push_back("Mixture_Fraction_rich"); + + MLPToolbox::CLookUp_ANN * test_mlp = new MLPToolbox::CLookUp_ANN("MLP_collection.mlp"); + MLPToolbox::CIOMap * test_map = new MLPToolbox::CIOMap(test_mlp, call_input_variables, call_output_variables); + MLPToolbox::CIOMap * lean_map = new MLPToolbox::CIOMap(test_mlp, call_inputvars_lean, call_output_variables); + MLPToolbox::CIOMap * rich_map = new MLPToolbox::CIOMap(test_mlp, call_inputvars_rich, call_output_variables); + + + test_mlp->Predict_ANN(test_map, call_inputs, call_outputs); + cout << "in flame zone: " << Density << " " << Temperature << endl; + call_inputs[0] = -7.36; + call_inputs[1] = 26468.5; + call_inputs[2] = 1.0; + test_mlp->Predict_ANN(rich_map, call_inputs, call_outputs); + cout << "rich: " << Density << " " << Temperature << endl; + call_inputs[0] = -0.4753; + call_inputs[1] = 3144.6; + call_inputs[2] = 0.0; + test_mlp->Predict_ANN(lean_map, call_inputs, call_outputs); + cout << "lean: " << Density << " " << Temperature << endl; + + delete test_mlp; + delete test_map; + delete rich_map; + delete lean_map; } su2double CPengRobinson::alpha2(su2double T) const { From 04813b107392ff542fc4fb7577e30bd2a54bf7e5 Mon Sep 17 00:00:00 2001 From: EvertBunschoten Date: Wed, 19 Oct 2022 14:43:54 +0200 Subject: [PATCH 08/84] Added a MLP fluid model template class --- Common/include/CConfig.hpp | 7 ++ Common/include/option_structure.hpp | 2 + Common/src/CConfig.cpp | 3 + .../multilayer_perceptron/CIOMap.cpp | 2 +- .../CReadNeuralNetwork.cpp | 3 +- .../{CMLPGas_Template.hpp => CMLPGas.hpp} | 13 +-- SU2_CFD/obj/Makefile.am | 2 +- .../{CMLPGas_Template.cpp => CMLPGas.cpp} | 95 ++++++++++++------- SU2_CFD/src/meson.build | 2 +- SU2_CFD/src/solvers/CEulerSolver.cpp | 12 +++ SU2_CFD/src/solvers/CIncEulerSolver.cpp | 13 +++ 11 files changed, 111 insertions(+), 43 deletions(-) rename SU2_CFD/include/fluid/{CMLPGas_Template.hpp => CMLPGas.hpp} (91%) rename SU2_CFD/src/fluid/{CMLPGas_Template.cpp => CMLPGas.cpp} (54%) diff --git a/Common/include/CConfig.hpp b/Common/include/CConfig.hpp index 9e47dc85e567..db4410287a85 100644 --- a/Common/include/CConfig.hpp +++ b/Common/include/CConfig.hpp @@ -223,6 +223,7 @@ class CConfig { string Inlet_Filename; /*!< \brief Filename specifying an inlet profile. */ su2double Inlet_Matching_Tol; /*!< \brief Tolerance used when matching a point to a point from the inlet file. */ string ActDisk_FileName; /*!< \brief Filename specifying an actuator disk. */ + string MLP_filename; /*!< \brief Filename specifying an MLP-driven fluid model. */ string *Marker_Euler, /*!< \brief Euler wall markers. */ *Marker_FarField, /*!< \brief Far field markers. */ @@ -4842,6 +4843,12 @@ class CConfig { */ string GetActDisk_FileName(void) const { return ActDisk_FileName; } + /*! + * \brief Get name of the input file for the fluid model multi-layer perceptron collection. + * \return Name of the input file for the specified MLP collection file. + */ + string GetMLP_FileName(void) const { return MLP_filename; } + /*! * \brief Get the tolerance used for matching two points on a specified inlet * \return Tolerance used for matching a point to a specified inlet diff --git a/Common/include/option_structure.hpp b/Common/include/option_structure.hpp index 0b97712612ad..45d475fbbe47 100644 --- a/Common/include/option_structure.hpp +++ b/Common/include/option_structure.hpp @@ -569,6 +569,7 @@ enum ENUM_FLUIDMODEL { MUTATIONPP = 7, /*!< \brief Mutation++ gas model for nonequilibrium flow. */ SU2_NONEQ = 8, /*!< \brief User defined gas model for nonequilibrium flow. */ FLUID_MIXTURE = 9, /*!< \brief Species mixture model. */ + MLP_GAS = 10, /*!< \brief multi-layer perceptron driven fluid model. */ }; static const MapType FluidModel_Map = { MakePair("STANDARD_AIR", STANDARD_AIR) @@ -581,6 +582,7 @@ static const MapType FluidModel_Map = { MakePair("MUTATIONPP", MUTATIONPP) MakePair("SU2_NONEQ", SU2_NONEQ) MakePair("FLUID_MIXTURE", FLUID_MIXTURE) + MakePair("MLP_GAS", MLP_GAS) }; /*! diff --git a/Common/src/CConfig.cpp b/Common/src/CConfig.cpp index cc9ea692c76a..e6375f1a78de 100644 --- a/Common/src/CConfig.cpp +++ b/Common/src/CConfig.cpp @@ -1482,6 +1482,9 @@ void CConfig::SetConfig_Options() { /*!\brief ACTDISK_FILENAME \n DESCRIPTION: Input file for a specified actuator disk (w/ extension) \n DEFAULT: actdiskinput.dat \ingroup Config*/ addStringOption("ACTDISK_FILENAME", ActDisk_FileName, string("actdiskinput.dat")); + /*!\brief MLP_FILENAME \n DESCRIPTION: Input file for a multi-layer perceptron input file for fluid model definition. w/ extension) \n DEFAULT: MLP_collection.mlp \ingroup Config*/ + addStringOption("MLP_FILENAME", MLP_filename, string("MLP_collection.mlp")); + /*!\brief INLET_TYPE \n DESCRIPTION: Inlet boundary type \n OPTIONS: see \link Inlet_Map \endlink \n DEFAULT: TOTAL_CONDITIONS \ingroup Config*/ addEnumOption("INLET_TYPE", Kind_Inlet, Inlet_Map, INLET_TYPE::TOTAL_CONDITIONS); /*!\brief INC_INLET_TYPE \n DESCRIPTION: List of inlet types for incompressible flows. List length must match number of inlet markers. Options: VELOCITY_INLET, PRESSURE_INLET, INPUT_FILE. \ingroup Config*/ diff --git a/Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp b/Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp index 789988af101c..36d08a081db1 100644 --- a/Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CIOMap.cpp @@ -48,7 +48,7 @@ MLPToolbox::CIOMap::CIOMap(CLookUp_ANN*MLP_collection, vector &inputs, v MLP_collection->Check_Use_of_Outputs(outputs, this); // Check wether there are any duplicates in the MLP outputs - MLP_collection->Check_Duplicate_Outputs(outputs, this); + //MLP_collection->Check_Duplicate_Outputs(outputs, this); } } void MLPToolbox::CIOMap::PairVariableswithMLPs(CLookUp_ANN*MLP_collection, vector &inputs, vector &outputs){ diff --git a/Common/src/toolboxes/multilayer_perceptron/CReadNeuralNetwork.cpp b/Common/src/toolboxes/multilayer_perceptron/CReadNeuralNetwork.cpp index 8c34674e2605..73e49fda1e14 100644 --- a/Common/src/toolboxes/multilayer_perceptron/CReadNeuralNetwork.cpp +++ b/Common/src/toolboxes/multilayer_perceptron/CReadNeuralNetwork.cpp @@ -30,7 +30,7 @@ using namespace std; MLPToolbox::CReadNeuralNetwork::CReadNeuralNetwork(string filename_in){ filename = filename_in; -}; +} void MLPToolbox::CReadNeuralNetwork::ReadMLPFile(){ ifstream file_stream; @@ -109,7 +109,6 @@ void MLPToolbox::CReadNeuralNetwork::ReadMLPFile(){ /* Read MLP input variable names */ if(line.compare("[input names]") == 0){ found_input_names = true; - unsigned short j{1}; getline(file_stream, line); while(line.compare("") != 0){ input_names.push_back(line); diff --git a/SU2_CFD/include/fluid/CMLPGas_Template.hpp b/SU2_CFD/include/fluid/CMLPGas.hpp similarity index 91% rename from SU2_CFD/include/fluid/CMLPGas_Template.hpp rename to SU2_CFD/include/fluid/CMLPGas.hpp index db3207c3313e..0da131105b43 100644 --- a/SU2_CFD/include/fluid/CMLPGas_Template.hpp +++ b/SU2_CFD/include/fluid/CMLPGas.hpp @@ -1,5 +1,5 @@ /*! - * \file CMLPGas_Template.hpp + * \file CMLPGas.hpp * \brief Defines a template fluid model class using multilayer perceptrons * for theromodynamic state definition * \author E.Bunschoten @@ -31,17 +31,18 @@ #include "CFluidModel.hpp" #include "../../../Common/include/toolboxes/multilayer_perceptron/CLookUp_ANN.hpp" /*! - * \class CMLPGas_Template + * \class CMLPGas * \brief Template class for fluid model definition using multi-layer perceptrons for * fluid dynamic state definition. * \author: E.Bunschoten. */ -class CMLPGas_Template : public CFluidModel { +class CMLPGas : public CFluidModel { protected: su2double Gamma{0.0}; /*!< \brief Ratio of Specific Heats. */ su2double Gamma_Minus_One{0.0}; /*!< \brief Ratio of Specific Heats Minus One. */ su2double Gas_Constant{0.0}; /*!< \brief Gas Constant. */ - bool ComputeEntropy{true}; /*!< \brief Whether or not to compute entropy. */ + + su2double R_u = 8.31451; string ann_input_filename; MLPToolbox::CLookUp_ANN * lookup_ann; @@ -69,9 +70,9 @@ class CMLPGas_Template : public CFluidModel { /*! * \brief Constructor of the class. */ - CMLPGas_Template(su2double gamma, su2double R, bool CompEntropy = true); + CMLPGas(const CConfig* config); - ~CMLPGas_Template(); + ~CMLPGas(); /*! * \brief Set the Dimensionless State using Density and Internal Energy * \param[in] rho - first thermodynamic variable. diff --git a/SU2_CFD/obj/Makefile.am b/SU2_CFD/obj/Makefile.am index 5ba91fd0d4e0..361d69d5af9b 100644 --- a/SU2_CFD/obj/Makefile.am +++ b/SU2_CFD/obj/Makefile.am @@ -57,7 +57,7 @@ libSU2Core_sources = ../src/definition_structure.cpp \ ../src/fluid/CNEMOGas.cpp \ ../src/fluid/CSU2TCLib.cpp \ ../src/fluid/CMutationTCLib.cpp \ - ../src/fluid/CMLPGas_Template.cpp \ + ../src/fluid/CMLPGas.cpp \ ../src/integration/CIntegration.cpp \ ../src/integration/CSingleGridIntegration.cpp \ ../src/integration/CMultiGridIntegration.cpp \ diff --git a/SU2_CFD/src/fluid/CMLPGas_Template.cpp b/SU2_CFD/src/fluid/CMLPGas.cpp similarity index 54% rename from SU2_CFD/src/fluid/CMLPGas_Template.cpp rename to SU2_CFD/src/fluid/CMLPGas.cpp index 44e00c5ec8de..219dc2351e34 100644 --- a/SU2_CFD/src/fluid/CMLPGas_Template.cpp +++ b/SU2_CFD/src/fluid/CMLPGas.cpp @@ -1,5 +1,5 @@ /*! - * \file CMLPGas_Template.cpp + * \file CMLPGas.cpp * \brief Source of the data-driven fluid model class using * multilayer perceptrons for data regression * \author E.Bunschoten @@ -26,35 +26,28 @@ * License along with SU2. If not, see . */ -#include "../../include/fluid/CMLPGas_Template.hpp" +#include "../../include/fluid/CMLPGas.hpp" -CMLPGas_Template::CMLPGas_Template(su2double gamma, su2double R, bool CompEntropy) : CFluidModel() { - Gamma = gamma; - Gamma_Minus_One = Gamma - 1.0; - Gas_Constant = R; - Cp = Gamma / Gamma_Minus_One * Gas_Constant; - Cv = Cp - R; - - ComputeEntropy = CompEntropy; +CMLPGas::CMLPGas(const CConfig* config) : CFluidModel() { /* Define a CLookUp_ANN object to allow for data regression */ - ann_input_filename = "MLP_collection.mlp"; + ann_input_filename = config->GetMLP_FileName(); lookup_ann = new MLPToolbox::CLookUp_ANN(ann_input_filename); /* Map MLP inputs to outputs for each look-up operation*/ MapInputs_to_Outputs(); } -CMLPGas_Template::~CMLPGas_Template(){ +CMLPGas::~CMLPGas(){ delete iomap_rhoe; delete iomap_PT; delete iomap_Prho; delete lookup_ann; } -void CMLPGas_Template::MapInputs_to_Outputs(){ +void CMLPGas::MapInputs_to_Outputs(){ - input_names_rhoe.push_back("rho"); - input_names_rhoe.push_back("e"); + input_names_rhoe.push_back("Density"); + input_names_rhoe.push_back("Energy"); output_names_rhoe.push_back("Temperature"); outputs_rhoe.push_back(&Temperature); @@ -75,41 +68,79 @@ void CMLPGas_Template::MapInputs_to_Outputs(){ iomap_rhoe = new MLPToolbox::CIOMap(lookup_ann, input_names_rhoe, output_names_rhoe); - input_names_PT.push_back("P"); - input_names_PT.push_back("T"); + input_names_PT.push_back("Pressure"); + input_names_PT.push_back("Temperature"); - output_names_PT.push_back("rho"); + output_names_PT.push_back("Density"); outputs_PT.push_back(&Density); - output_names_PT.push_back("e"); + output_names_PT.push_back("Energy"); outputs_PT.push_back(&StaticEnergy); iomap_PT = new MLPToolbox::CIOMap(lookup_ann, input_names_PT, output_names_PT); - input_names_Prho.push_back("P"); - input_names_Prho.push_back("rho"); + input_names_Prho.push_back("Pressure"); + input_names_Prho.push_back("Density"); - output_names_Prho.push_back("e"); + output_names_Prho.push_back("Energy"); outputs_Prho.push_back(&StaticEnergy); iomap_Prho = new MLPToolbox::CIOMap(lookup_ann, input_names_Prho, output_names_Prho); } -void CMLPGas_Template::SetTDState_rhoe(su2double rho, su2double e) { +void CMLPGas::SetTDState_rhoe(su2double rho, su2double e) { + Density = rho; + StaticEnergy = e; vector ann_inputs; ann_inputs.push_back(rho); ann_inputs.push_back(e); lookup_ann->Predict_ANN(iomap_rhoe, ann_inputs, outputs_rhoe); -} + Cp = (Pressure / (R_u * Density * Temperature)) * 1 / (dTde_rho); + Cv = (1/dTde_rho); + Gamma = Cp / Cv; + Gamma_Minus_One = Gamma - 1; + Gas_Constant = Cp * Gamma_Minus_One; -void CMLPGas_Template::SetTDState_PT(su2double P, su2double T) { - vector ann_inputs; - ann_inputs.push_back(P); - ann_inputs.push_back(T); - lookup_ann->Predict_ANN(iomap_PT, ann_inputs, outputs_PT); +} - SetTDState_rhoe(Density, StaticEnergy); +void CMLPGas::SetTDState_PT(su2double P, su2double T) { + // vector ann_inputs; + // ann_inputs.push_back(P); + // ann_inputs.push_back(T); + // lookup_ann->Predict_ANN(iomap_PT, ann_inputs, outputs_PT); + + su2double rho = 0.5*(6.9682399336300005e-01 + 2.4896548789900002e+00); + su2double e = 0.5*(3.2562734877400001e+05 +4.8566089128400001e+05); + rho = 1.0; + e = 3.2562734877400001e+05; + su2double delta_P, delta_T, delta_rho, delta_e, tolerance_P = 10, tolerance_T = 1.0; + su2double determinant, relaxation = 0.1; + unsigned long iter = 0, iter_max = 1000; + bool converged= false; + cout << "Target PT: " << P << " " << T << endl; + while(!converged && (iter < iter_max)){ + SetTDState_rhoe(rho, e); + delta_P = (P - Pressure); + delta_T = (T - Temperature); + cout << delta_P << " " << delta_T << endl; + if((abs(delta_P) ann_inputs; ann_inputs.push_back(P); ann_inputs.push_back(rho); @@ -117,7 +148,7 @@ void CMLPGas_Template::SetTDState_Prho(su2double P, su2double rho) { SetTDState_rhoe(rho, StaticEnergy); } -void CMLPGas_Template::SetEnergy_Prho(su2double P, su2double rho) { +void CMLPGas::SetEnergy_Prho(su2double P, su2double rho) { vector ann_inputs; ann_inputs.push_back(P); ann_inputs.push_back(rho); diff --git a/SU2_CFD/src/meson.build b/SU2_CFD/src/meson.build index 9d81ec13f717..e477f80b2b65 100644 --- a/SU2_CFD/src/meson.build +++ b/SU2_CFD/src/meson.build @@ -11,7 +11,7 @@ su2_cfd_src += files(['fluid/CFluidModel.cpp', 'fluid/CNEMOGas.cpp', 'fluid/CMutationTCLib.cpp', 'fluid/CSU2TCLib.cpp', - 'fluid/CMLPGas_Template.cpp']) + 'fluid/CMLPGas.cpp']) su2_cfd_src += files(['output/COutputFactory.cpp', 'output/CAdjElasticityOutput.cpp', diff --git a/SU2_CFD/src/solvers/CEulerSolver.cpp b/SU2_CFD/src/solvers/CEulerSolver.cpp index 43dcd3de27c1..35338ebbfda9 100644 --- a/SU2_CFD/src/solvers/CEulerSolver.cpp +++ b/SU2_CFD/src/solvers/CEulerSolver.cpp @@ -32,6 +32,7 @@ #include "../../include/fluid/CIdealGas.hpp" #include "../../include/fluid/CVanDerWaalsGas.hpp" #include "../../include/fluid/CPengRobinson.hpp" +#include "../../include/fluid/CMLPGas.hpp" #include "../../include/numerics_simd/CNumericsSIMD.hpp" #include "../../include/limiters/CLimiterDetails.hpp" @@ -857,6 +858,12 @@ void CEulerSolver::SetNondimensionalization(CConfig *config, unsigned short iMes auxFluidModel = new CPengRobinson(Gamma, config->GetGas_Constant(), config->GetPressure_Critical(), config->GetTemperature_Critical(), config->GetAcentric_Factor()); break; + + case MLP_GAS: + + auxFluidModel = new CMLPGas(config); + + break; default: SU2_MPI::Error("Unknown fluid model.", CURRENT_FUNCTION); @@ -1070,6 +1077,11 @@ void CEulerSolver::SetNondimensionalization(CConfig *config, unsigned short iMes config->GetTemperature_Critical() / config->GetTemperature_Ref(), config->GetAcentric_Factor()); break; + + case MLP_GAS: + FluidModel[thread] = new CMLPGas(config); + + break; } GetFluidModel()->SetEnergy_Prho(Pressure_FreeStreamND, Density_FreeStreamND); diff --git a/SU2_CFD/src/solvers/CIncEulerSolver.cpp b/SU2_CFD/src/solvers/CIncEulerSolver.cpp index 371055f31bb5..796144f085fe 100644 --- a/SU2_CFD/src/solvers/CIncEulerSolver.cpp +++ b/SU2_CFD/src/solvers/CIncEulerSolver.cpp @@ -30,6 +30,7 @@ #include "../../include/fluid/CConstantDensity.hpp" #include "../../include/fluid/CIncIdealGas.hpp" #include "../../include/fluid/CIncIdealGasPolynomial.hpp" +#include "../../include/fluid/CMLPGas.hpp" #include "../../include/variables/CIncNSVariable.hpp" #include "../../include/limiters/CLimiterDetails.hpp" #include "../../../Common/include/toolboxes/geometry_toolbox.hpp" @@ -330,6 +331,13 @@ void CIncEulerSolver::SetNondimensionalization(CConfig *config, unsigned short i config->SetPressure_Thermodynamic(Pressure_Thermodynamic); break; + case MLP_GAS: + auxFluidModel = new CMLPGas(config); + auxFluidModel->SetTDState_T(Temperature_FreeStream); + Pressure_Thermodynamic = auxFluidModel->GetPressure(); + config->SetPressure_Thermodynamic(Pressure_Thermodynamic); + break; + default: SU2_MPI::Error("Fluid model not implemented for incompressible solver.", CURRENT_FUNCTION); @@ -492,6 +500,11 @@ void CIncEulerSolver::SetNondimensionalization(CConfig *config, unsigned short i fluidModel->SetTDState_T(Temperature_FreeStreamND); break; + case MLP_GAS: + fluidModel = new CMLPGas(config); + fluidModel->SetTDState_T(Temperature_FreeStreamND); + break; + } if (viscous) { From e8a3a93e987c38e54e98b86c9f35d4830d3c90c9 Mon Sep 17 00:00:00 2001 From: EvertBunschoten Date: Mon, 21 Nov 2022 11:34:58 +0100 Subject: [PATCH 09/84] Bug fix --- SU2_CFD/src/fluid/CPengRobinson.cpp | 50 ----------------------------- 1 file changed, 50 deletions(-) diff --git a/SU2_CFD/src/fluid/CPengRobinson.cpp b/SU2_CFD/src/fluid/CPengRobinson.cpp index 962e8a60ea16..2b5b09d611da 100644 --- a/SU2_CFD/src/fluid/CPengRobinson.cpp +++ b/SU2_CFD/src/fluid/CPengRobinson.cpp @@ -39,56 +39,6 @@ CPengRobinson::CPengRobinson(su2double gamma, su2double R, su2double Pstar, su2d k = 0.37464 + 1.54226 * w - 0.26992 * w * w; else k = 0.379642 + 1.48503 * w - 0.164423 * w * w + 0.016666 * w * w * w; - - vector call_input_variables; - vector call_output_variables; - vector call_inputs; - vector call_outputs; - call_input_variables.push_back("Progress_Variable"); - call_inputs.push_back(-5.74934292655689e-01); - call_input_variables.push_back("Enthalpy"); - call_inputs.push_back(+2.22692201641688e+03); - call_input_variables.push_back("Mixture_Fraction"); - call_inputs.push_back(0.0144045); - - call_output_variables.push_back("Temperature"); - call_outputs.push_back(&Temperature); - call_output_variables.push_back("Density"); - call_outputs.push_back(&Density); - - vector call_inputvars_lean; - call_inputvars_lean.push_back("Progress_Variable_lean"); - call_inputvars_lean.push_back("Enthalpy_lean"); - call_inputvars_lean.push_back("Mixture_Fraction_lean"); - - vector call_inputvars_rich; - call_inputvars_rich.push_back("Progress_Variable_rich"); - call_inputvars_rich.push_back("Enthalpy_rich"); - call_inputvars_rich.push_back("Mixture_Fraction_rich"); - - MLPToolbox::CLookUp_ANN * test_mlp = new MLPToolbox::CLookUp_ANN("MLP_collection.mlp"); - MLPToolbox::CIOMap * test_map = new MLPToolbox::CIOMap(test_mlp, call_input_variables, call_output_variables); - MLPToolbox::CIOMap * lean_map = new MLPToolbox::CIOMap(test_mlp, call_inputvars_lean, call_output_variables); - MLPToolbox::CIOMap * rich_map = new MLPToolbox::CIOMap(test_mlp, call_inputvars_rich, call_output_variables); - - - test_mlp->Predict_ANN(test_map, call_inputs, call_outputs); - cout << "in flame zone: " << Density << " " << Temperature << endl; - call_inputs[0] = -7.36; - call_inputs[1] = 26468.5; - call_inputs[2] = 1.0; - test_mlp->Predict_ANN(rich_map, call_inputs, call_outputs); - cout << "rich: " << Density << " " << Temperature << endl; - call_inputs[0] = -0.4753; - call_inputs[1] = 3144.6; - call_inputs[2] = 0.0; - test_mlp->Predict_ANN(lean_map, call_inputs, call_outputs); - cout << "lean: " << Density << " " << Temperature << endl; - - delete test_mlp; - delete test_map; - delete rich_map; - delete lean_map; } su2double CPengRobinson::alpha2(su2double T) const { From 0a565343307aa6a5a7f012f2779cf5c02ad4e8b7 Mon Sep 17 00:00:00 2001 From: EvertBunschoten Date: Mon, 21 Nov 2022 16:57:25 +0100 Subject: [PATCH 10/84] Allowed for LUT alongside MLP evaluation in datadriven fluid model --- .../multilayer_perceptron/CNeuralNetwork.hpp | 3 + SU2_CFD/include/fluid/CDataDrivenFluid.hpp | 75 +++++-- SU2_CFD/src/fluid/CDataDrivenFluid.cpp | 211 ++++++++++++++++-- 3 files changed, 246 insertions(+), 43 deletions(-) diff --git a/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp b/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp index a7d1cfba5e43..b58528069d8e 100644 --- a/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp +++ b/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp @@ -83,6 +83,9 @@ class CNeuralNetwork delete total_layers.at(i); } delete [] ANN_outputs; + delete [] inputLayer; + delete [] outputLayer; + delete [] activation_function_types; }; void defineInputLayer(unsigned long n_neurons); void defineOutputLayer(unsigned long n_neurons); diff --git a/SU2_CFD/include/fluid/CDataDrivenFluid.hpp b/SU2_CFD/include/fluid/CDataDrivenFluid.hpp index 2866bcbebf8b..79e423d62152 100644 --- a/SU2_CFD/include/fluid/CDataDrivenFluid.hpp +++ b/SU2_CFD/include/fluid/CDataDrivenFluid.hpp @@ -30,6 +30,8 @@ #include "CFluidModel.hpp" #include "../../../Common/include/toolboxes/multilayer_perceptron/CLookUp_ANN.hpp" +#include "../../../Common/include/containers/CLookUpTable.hpp" + /*! * \class CDataDrivenFluid * \brief Template class for fluid model definition using multi-layer perceptrons for @@ -39,26 +41,35 @@ class CDataDrivenFluid : public CFluidModel { protected: - unsigned short Kind_DataDriven_Method = ENUM_DATADRIVEN_METHOD::LUT; - size_t idx_rho, - idx_e; + unsigned short Kind_DataDriven_Method = ENUM_DATADRIVEN_METHOD::LUT; // Interpolation method for data set evaluation - su2double Newton_Relaxation, - rho_start, - e_start; + size_t idx_rho, // Interpolator index for density input + idx_e; // Interpolator index for energy input - string input_filename; + su2double Newton_Relaxation, // Relaxation factor for Newton solvers. + rho_start, // Initial value for the density in Newton solver processes. + e_start; // Initial value for the energy in Newton solver processes. - vector input_names_rhoe, - output_names_rhoe; + string input_filename; // Data-driven method input file name. + + su2double dsde_rho, dsdrho_e, d2sde2, d2sdedrho, d2sdrho2; + + su2double Gamma{0.0}; /*!< \brief Ratio of Specific Heats. */ + su2double Gamma_Minus_One{0.0}; /*!< \brief Ratio of Specific Heats Minus One. */ + su2double Gas_Constant{0.0}; /*!< \brief Gas Constant. */ + + vector input_names_rhoe, // Data-driven method input variable names of the independent variables (density, energy). + output_names_rhoe; // Output variable names listed in the data-driven method input file name. - vector outputs_rhoe; + vector outputs_rhoe; // Pointers to output variables. - MLPToolbox::CLookUp_ANN * lookup_mlp; - MLPToolbox::CIOMap * iomap_rhoe; - vector MLP_inputs; + /*--- Class variables for the multi-layer perceptron method ---*/ + MLPToolbox::CLookUp_ANN * lookup_mlp; // multi-layer perceptron collection. + MLPToolbox::CIOMap * iomap_rhoe; // input-output map. + vector MLP_inputs; // inputs for the multi-layer perceptron look-up operation. - + /*--- Class variables for the look-up table method ---*/ + CLookUpTable *lookup_table; void MapInputs_to_Outputs(); @@ -71,29 +82,49 @@ class CDataDrivenFluid : public CFluidModel { ~CDataDrivenFluid(); /*! * \brief Set the Dimensionless State using Density and Internal Energy - * \param[in] rho - first thermodynamic variable. - * \param[in] e - second thermodynamic variable. + * \param[in] rho - first thermodynamic variable (density). + * \param[in] e - second thermodynamic variable (static energy). */ void SetTDState_rhoe(su2double rho, su2double e) override; /*! * \brief Set the Dimensionless State using Pressure and Temperature - * \param[in] P - first thermodynamic variable. - * \param[in] T - second thermodynamic variable. + * \param[in] P - first thermodynamic variable (pressure). + * \param[in] T - second thermodynamic variable (temperature). */ void SetTDState_PT(su2double P, su2double T) override; /*! * \brief Set the Dimensionless State using Pressure and Density - * \param[in] P - first thermodynamic variable. - * \param[in] rho - second thermodynamic variable. + * \param[in] P - first thermodynamic variable (pressure). + * \param[in] rho - second thermodynamic variable (density). */ void SetTDState_Prho(su2double P, su2double rho) override; /*! * \brief Set the Dimensionless Internal Energy using Pressure and Density - * \param[in] P - first thermodynamic variable. - * \param[in] rho - second thermodynamic variable. + * \param[in] P - first thermodynamic variable (pressure). + * \param[in] rho - second thermodynamic variable (density). */ void SetEnergy_Prho(su2double P, su2double rho) override; + + /*! + * \brief Set the Dimensionless Internal Energy using Pressure and Density + * \param[in] rho - second thermodynamic variable (density). + */ + void SetTDState_rhoT(su2double rho, su2double T) override; + + /*! + * \brief Set the Dimensionless State using Enthalpy and Entropy + * \param[in] h - first thermodynamic variable (h). + * \param[in] s - second thermodynamic variable (s). + */ + void SetTDState_hs(su2double h, su2double s) override; + + /*! + * \brief Set the Dimensionless State using Pressure and Entropy + * \param[in] P - first thermodynamic variable (P). + * \param[in] s - second thermodynamic variable (s). + */ + void SetTDState_Ps(su2double P, su2double s) override; }; diff --git a/SU2_CFD/src/fluid/CDataDrivenFluid.cpp b/SU2_CFD/src/fluid/CDataDrivenFluid.cpp index 4f4b7e957902..7dafd4bf5e62 100644 --- a/SU2_CFD/src/fluid/CDataDrivenFluid.cpp +++ b/SU2_CFD/src/fluid/CDataDrivenFluid.cpp @@ -44,6 +44,9 @@ CDataDrivenFluid::CDataDrivenFluid(const CConfig* config) : CFluidModel() { case ENUM_DATADRIVEN_METHOD::MLP: lookup_mlp = new MLPToolbox::CLookUp_ANN(input_filename); break; + case ENUM_DATADRIVEN_METHOD::LUT: + lookup_table = new CLookUpTable(input_filename, "Density", "Energy"); + break; default: break; } @@ -66,7 +69,9 @@ CDataDrivenFluid::~CDataDrivenFluid(){ delete iomap_rhoe; delete lookup_mlp; break; - + case ENUM_DATADRIVEN_METHOD::LUT: + delete lookup_table; + break; default: break; } @@ -80,23 +85,19 @@ void CDataDrivenFluid::MapInputs_to_Outputs(){ input_names_rhoe.push_back("Energy"); idx_e = 1; - /*--- Required outputs for the interpolation method are primary and secondary thermodynamic variables. ---*/ - output_names_rhoe.push_back("Temperature"); - outputs_rhoe.push_back(&Temperature); - output_names_rhoe.push_back("Pressure"); - outputs_rhoe.push_back(&Pressure); - output_names_rhoe.push_back("SoundSpeed2"); - outputs_rhoe.push_back(&SoundSpeed2); - output_names_rhoe.push_back("dPdrho_e"); - outputs_rhoe.push_back(&dPdrho_e); - output_names_rhoe.push_back("dPde_rho"); - outputs_rhoe.push_back(&dPde_rho); - output_names_rhoe.push_back("dTdrho_e"); - outputs_rhoe.push_back(&dTdrho_e); - output_names_rhoe.push_back("dTde_rho"); - outputs_rhoe.push_back(&dTde_rho); - output_names_rhoe.push_back("Entropy"); + /*--- Required outputs for the interpolation method are entropy and its partial derivatives with respect to energy and density ---*/ + output_names_rhoe.push_back("s"); outputs_rhoe.push_back(&Entropy); + output_names_rhoe.push_back("dsde_rho"); + outputs_rhoe.push_back(&dsde_rho); + output_names_rhoe.push_back("dsdrho_e"); + outputs_rhoe.push_back(&dsdrho_e); + output_names_rhoe.push_back("d2sde2"); + outputs_rhoe.push_back(&d2sde2); + output_names_rhoe.push_back("d2sdedrho"); + outputs_rhoe.push_back(&d2sdedrho); + output_names_rhoe.push_back("d2sdrho2"); + outputs_rhoe.push_back(&d2sdrho2); /*--- Further preprocessing of input and output variables ---*/ switch (Kind_DataDriven_Method) @@ -125,15 +126,37 @@ void CDataDrivenFluid::SetTDState_rhoe(su2double rho, su2double e) { /* --- Evaluate MLP --- */ lookup_mlp->Predict_ANN(iomap_rhoe, MLP_inputs, outputs_rhoe); break; - + case ENUM_DATADRIVEN_METHOD::LUT: + lookup_table->LookUp_ProgEnth(output_names_rhoe, outputs_rhoe, rho, e, "Density", "Energy"); default: break; } + su2double blue_term = (dsdrho_e * (2 - rho * pow(dsde_rho, -1) * d2sdedrho) + rho * d2sdrho2); + su2double green_term = (- pow(dsde_rho, -1) * d2sde2 * dsdrho_e + d2sdedrho); + + SoundSpeed2 = - rho * pow(dsde_rho, -1) * (blue_term - rho * green_term * (dsdrho_e / dsde_rho)); + + Temperature = 1.0 / dsde_rho; + Pressure = -pow(rho, 2) * Temperature * dsdrho_e; + Density = rho; + StaticEnergy = e; + + dTde_rho = -pow(dsde_rho, -2)*d2sde2; + dTdrho_e = 0.0; + + dPde_rho = -pow(rho, 2) * dTde_rho * dsdrho_e; + dPdrho_e = -2*rho*Temperature*dsdrho_e - pow(rho, 2)*Temperature*d2sdrho2; + + Cp = (1 / dTde_rho) * ( 1 + (1 / Density)*dPde_rho) + (1/Density)*(1/dTdrho_e)*(dPdrho_e - Pressure / Density); + Cv = 1 / dTde_rho; + Gamma = Cp / Cv; + Gamma_Minus_One = Gamma - 1; + Gas_Constant = Cp - Cv; + } void CDataDrivenFluid::SetTDState_PT(su2double P, su2double T) { - /*--- Newton solver for */ /*--- Setting initial values for density and energy ---*/ su2double rho = rho_start, @@ -152,6 +175,7 @@ void CDataDrivenFluid::SetTDState_PT(su2double P, su2double T) { delta_e, // Energy step size determinant; // Jacobian determinant + /*--- Initiating Newton solver ---*/ while(!converged && (Iter < iter_max)){ /*--- Determine thermodynamic state based on current density and energy*/ @@ -220,8 +244,8 @@ void CDataDrivenFluid::SetEnergy_Prho(su2double P, su2double rho) { /*--- Compute step size for energy ---*/ delta_e = delta_P / dPde_rho; - /*--- Update density and energy values ---*/ - e -= Newton_Relaxation * delta_e; + /*--- energy value ---*/ + e += Newton_Relaxation * delta_e; } Iter ++; } @@ -229,3 +253,148 @@ void CDataDrivenFluid::SetEnergy_Prho(su2double P, su2double rho) { /*--- Setting static energy as the converged value ---*/ StaticEnergy = e; } + +void CDataDrivenFluid::SetTDState_rhoT(su2double rho, su2double T) { + + /*--- Setting initial values for density and energy ---*/ + su2double e = e_start; + + su2double tolerance_T = 1; // Tolerance for temperature solution + + bool converged = false; // Convergence flag + unsigned long iter_max = 1000, // Maximum number of iterations + Iter = 0; + + su2double delta_T, // Temperature residual + delta_e; // Energy increment + + /*--- Initiating Newton solver ---*/ + while(!converged && (Iter < iter_max)){ + + /*--- Determine thermodynamic state based on current density and energy*/ + SetTDState_rhoe(rho ,e); + + /*--- Determine temperature residual ---*/ + delta_T = Temperature - T; + + /*--- Continue iterative process if residuals are outside tolerances ---*/ + if((abs(delta_T) < tolerance_T)){ + converged = true; + }else{ + + + delta_e = delta_T / dTde_rho; + + /*--- Update energy value ---*/ + e += Newton_Relaxation * delta_e; + } + Iter ++; + } + + /*--- Calculate thermodynamic state based on converged values for density and energy ---*/ + SetTDState_rhoe(rho, e); +} + +void CDataDrivenFluid::SetTDState_hs(su2double h, su2double s) { + + /*--- Setting initial values for density and energy ---*/ + su2double rho = rho_start, + e = e_start; + + su2double tolerance_h = 10, // Tolerance for enthalpy solution + tolerance_s = 1; // Tolerance for entropy solution + + bool converged = false; // Convergence flag + unsigned long iter_max = 1000, // Maximum number of iterations + Iter = 0; + + su2double delta_h, // Enthalpy + delta_s, // Entropy residual + delta_rho, // Density step size + delta_e, // Energy step size + determinant; // Jacobian determinant + + /*--- Initiating Newton solver ---*/ + while(!converged && (Iter < iter_max)){ + + /*--- Determine thermodynamic state based on current density and energy*/ + SetTDState_rhoe(rho ,e); + + su2double Enthalpy = e + Pressure / rho; + /*--- Determine pressure and temperature residuals ---*/ + delta_h = Enthalpy - h; + delta_s = Entropy - s; + + /*--- Continue iterative process if residuals are outside tolerances ---*/ + if((abs(delta_h) < tolerance_h) && (abs(delta_s) < tolerance_s)){ + converged = true; + }else{ + + su2double dh_de = 1 + dPde_rho / rho; + su2double dh_drho = -Pressure*pow(rho, -2) + dPdrho_e / rho; + + determinant = dh_drho * dsde_rho - dh_de * dsdrho_e; + delta_rho = (dsde_rho * delta_h - dh_de * delta_s) / determinant; + delta_e = (-dsdrho_e * delta_h + dh_drho * delta_s) / determinant; + + /*--- Update density and energy values ---*/ + rho -= Newton_Relaxation * delta_rho; + e -= Newton_Relaxation * delta_e; + } + Iter ++; + } + + /*--- Calculate thermodynamic state based on converged values for density and energy ---*/ + SetTDState_rhoe(rho, e); +} + +void CDataDrivenFluid::SetTDState_Ps(su2double P, su2double s) { + + /*--- Setting initial values for density and energy ---*/ + su2double rho = rho_start, + e = e_start; + + su2double tolerance_P = 10, // Tolerance for pressure solution + tolerance_s = 1; // Tolerance for entropy solution + + bool converged = false; // Convergence flag + unsigned long iter_max = 1000, // Maximum number of iterations + Iter = 0; + + su2double delta_P, // Enthalpy + delta_s, // Entropy residual + delta_rho, // Density step size + delta_e, // Energy step size + determinant; // Jacobian determinant + + /*--- Initiating Newton solver ---*/ + while(!converged && (Iter < iter_max)){ + + /*--- Determine thermodynamic state based on current density and energy*/ + SetTDState_rhoe(rho ,e); + + /*--- Determine pressure and temperature residuals ---*/ + delta_P = Pressure - P; + delta_s = Entropy - s; + + /*--- Continue iterative process if residuals are outside tolerances ---*/ + if((abs(delta_P) < tolerance_P) && (abs(delta_s) < tolerance_s)){ + converged = true; + }else{ + + /*--- Compute step size for density and energy ---*/ + determinant = dPdrho_e * dsde_rho - dPde_rho * dsdrho_e; + + delta_rho = (dsde_rho * delta_P - dPde_rho * delta_s) / determinant; + delta_e = (-dsdrho_e * delta_P + dPdrho_e * delta_s) / determinant; + + /*--- Update density and energy values ---*/ + rho -= Newton_Relaxation * delta_rho; + e -= Newton_Relaxation * delta_e; + } + Iter ++; + } + + /*--- Calculate thermodynamic state based on converged values for density and energy ---*/ + SetTDState_rhoe(rho, e); +} \ No newline at end of file From 76194ecc4154ee6569a3420178c5af801478bfc9 Mon Sep 17 00:00:00 2001 From: EvertBunschoten Date: Tue, 22 Nov 2022 14:30:22 +0100 Subject: [PATCH 11/84] Improved Newton solver behavior in data driven fluid model --- .../multilayer_perceptron/CLayer.hpp | 3 -- .../multilayer_perceptron/CNeuralNetwork.hpp | 3 -- SU2_CFD/include/fluid/CDataDrivenFluid.hpp | 12 ++++++++ SU2_CFD/include/fluid/CFluidModel.hpp | 12 ++++++++ SU2_CFD/src/fluid/CCoolProp.cpp | 28 +++++++++++++++++++ SU2_CFD/src/fluid/CDataDrivenFluid.cpp | 12 ++++---- SU2_CFD/src/solvers/CEulerSolver.cpp | 5 +++- 7 files changed, 62 insertions(+), 13 deletions(-) diff --git a/Common/include/toolboxes/multilayer_perceptron/CLayer.hpp b/Common/include/toolboxes/multilayer_perceptron/CLayer.hpp index c72709fe52ea..925663128090 100644 --- a/Common/include/toolboxes/multilayer_perceptron/CLayer.hpp +++ b/Common/include/toolboxes/multilayer_perceptron/CLayer.hpp @@ -126,9 +126,6 @@ class CLayer */ string getActivationType(){return activation_type;} - ~CLayer(){ - delete [] neurons; - }; }; } diff --git a/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp b/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp index b58528069d8e..54eb19f3238e 100644 --- a/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp +++ b/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp @@ -83,8 +83,6 @@ class CNeuralNetwork delete total_layers.at(i); } delete [] ANN_outputs; - delete [] inputLayer; - delete [] outputLayer; delete [] activation_function_types; }; void defineInputLayer(unsigned long n_neurons); @@ -101,7 +99,6 @@ class CNeuralNetwork unsigned long getNNeurons(unsigned long iLayer){; return total_layers.at(iLayer)->getNNeurons();} - //unsigned long getNNeurons(unsigned long iLayer, unsigned long iNeuron){return weights.at(iLayer).at(iNeuron).size();} void predict(std::vector &inputs); diff --git a/SU2_CFD/include/fluid/CDataDrivenFluid.hpp b/SU2_CFD/include/fluid/CDataDrivenFluid.hpp index 79e423d62152..deb893edd6bb 100644 --- a/SU2_CFD/include/fluid/CDataDrivenFluid.hpp +++ b/SU2_CFD/include/fluid/CDataDrivenFluid.hpp @@ -127,4 +127,16 @@ class CDataDrivenFluid : public CFluidModel { * \param[in] s - second thermodynamic variable (s). */ void SetTDState_Ps(su2double P, su2double s) override; + + /*! + * \brief Set the initial guess for the density in Newton solvers + * \param[in] rho - Initial value for density. + */ + void SetDensity(su2double rho) override {rho_start = rho;} + + /*! + * \brief Set the initial guess for the static energy in Newton solvers + * \param[in] e - Initial value for static energy. + */ + void SetEnergy(su2double e) override {e_start = e;} }; diff --git a/SU2_CFD/include/fluid/CFluidModel.hpp b/SU2_CFD/include/fluid/CFluidModel.hpp index b57badc99c98..4a1bc47e4030 100644 --- a/SU2_CFD/include/fluid/CFluidModel.hpp +++ b/SU2_CFD/include/fluid/CFluidModel.hpp @@ -325,4 +325,16 @@ class CFluidModel { * \brief Set fluid eddy viscosity provided by a turbulence model needed for computing effective thermal conductivity. */ void SetEddyViscosity(su2double val_Mu_Turb) { Mu_Turb = val_Mu_Turb; } + + /*! + * \brief Set the initial guess for the density in Newton solvers + * \param[in] rho - Initial value for density. + */ + virtual void SetDensity(su2double rho) {} + + /*! + * \brief Set the initial guess for the static energy in Newton solvers + * \param[in] e - Initial value for static energy. + */ + virtual void SetEnergy(su2double e) {} }; diff --git a/SU2_CFD/src/fluid/CCoolProp.cpp b/SU2_CFD/src/fluid/CCoolProp.cpp index 61dfb5840cb4..9121c8164ed5 100644 --- a/SU2_CFD/src/fluid/CCoolProp.cpp +++ b/SU2_CFD/src/fluid/CCoolProp.cpp @@ -37,6 +37,34 @@ CCoolProp::CCoolProp(string fluidname) : CFluidModel() { Pressure_Critical = fluid_entity->p_critical(); Temperature_Critical = fluid_entity->T_critical(); acentric_factor = fluid_entity->acentric_factor(); + + su2double p = 5e4; + su2double T = 250; + su2double p_max = 2e6; + su2double T_max = 600; + su2double delta_p = (p_max - p)/500; + su2double delta_T = (T_max - T)/500; + cout << "rho, e, s, dsde_rho, dsdrho_e, d2sde2, d2sdedrho, d2sdrho2, pressure, temperature, soundspeed2" << endl; + for(unsigned long i=0;i<500;i++){ + p = 5e4; + for(unsigned long j=0; j<500;j++){ + fluid_entity->update(CoolProp::PT_INPUTS, p, T); + cout << fluid_entity->rhomass() << ", " + << fluid_entity->umass() << ", " + << fluid_entity->smass() << ", " + << fluid_entity->first_partial_deriv(CoolProp::iSmass, CoolProp::iUmass, CoolProp::iDmass) << ", " + << fluid_entity->first_partial_deriv(CoolProp::iSmass, CoolProp::iDmass, CoolProp::iUmass) << ", " + << fluid_entity->second_partial_deriv(CoolProp::iSmass, CoolProp::iUmass, CoolProp::iDmass, CoolProp::iUmass, CoolProp::iDmass) << ", " + << fluid_entity->second_partial_deriv(CoolProp::iSmass, CoolProp::iUmass, CoolProp::iDmass, CoolProp::iDmass, CoolProp::iUmass) << ", " + << fluid_entity->second_partial_deriv(CoolProp::iSmass, CoolProp::iDmass, CoolProp::iUmass, CoolProp::iDmass, CoolProp::iUmass) << ", " + << p << ", " + << T << ", " + << pow(fluid_entity->speed_sound(), 2) << endl; + p += delta_p; + } + T += delta_T; + + } } CCoolProp::~CCoolProp() {} diff --git a/SU2_CFD/src/fluid/CDataDrivenFluid.cpp b/SU2_CFD/src/fluid/CDataDrivenFluid.cpp index 7dafd4bf5e62..b95f7c819862 100644 --- a/SU2_CFD/src/fluid/CDataDrivenFluid.cpp +++ b/SU2_CFD/src/fluid/CDataDrivenFluid.cpp @@ -60,6 +60,7 @@ CDataDrivenFluid::CDataDrivenFluid(const CConfig* config) : CFluidModel() { /*--- Set initial values for density and energy based on config options ---*/ rho_start = config->GetDensity_Init_DataDriven(); e_start = config->GetEnergy_Init_DataDriven(); + } CDataDrivenFluid::~CDataDrivenFluid(){ @@ -148,7 +149,7 @@ void CDataDrivenFluid::SetTDState_rhoe(su2double rho, su2double e) { dPde_rho = -pow(rho, 2) * dTde_rho * dsdrho_e; dPdrho_e = -2*rho*Temperature*dsdrho_e - pow(rho, 2)*Temperature*d2sdrho2; - Cp = (1 / dTde_rho) * ( 1 + (1 / Density)*dPde_rho) + (1/Density)*(1/dTdrho_e)*(dPdrho_e - Pressure / Density); + Cp = (1 / dTde_rho) * ( 1 + (1 / Density)*dPde_rho); Cv = 1 / dTde_rho; Gamma = Cp / Cv; Gamma_Minus_One = Gamma - 1; @@ -174,7 +175,6 @@ void CDataDrivenFluid::SetTDState_PT(su2double P, su2double T) { delta_rho, // Density step size delta_e, // Energy step size determinant; // Jacobian determinant - /*--- Initiating Newton solver ---*/ while(!converged && (Iter < iter_max)){ @@ -184,7 +184,6 @@ void CDataDrivenFluid::SetTDState_PT(su2double P, su2double T) { /*--- Determine pressure and temperature residuals ---*/ delta_P = Pressure - P; delta_T = Temperature - T; - /*--- Continue iterative process if residuals are outside tolerances ---*/ if((abs(delta_P) < tolerance_P) && (abs(delta_T) < tolerance_T)){ converged = true; @@ -199,6 +198,7 @@ void CDataDrivenFluid::SetTDState_PT(su2double P, su2double T) { /*--- Update density and energy values ---*/ rho -= Newton_Relaxation * delta_rho; e -= Newton_Relaxation * delta_e; + } Iter ++; } @@ -210,6 +210,7 @@ void CDataDrivenFluid::SetTDState_PT(su2double P, su2double T) { void CDataDrivenFluid::SetTDState_Prho(su2double P, su2double rho) { /*--- Computing static energy according to pressure and density ---*/ SetEnergy_Prho(P, rho); + /*--- Calculate thermodynamic state based on converged value for energy ---*/ SetTDState_rhoe(rho, StaticEnergy); @@ -245,7 +246,7 @@ void CDataDrivenFluid::SetEnergy_Prho(su2double P, su2double rho) { delta_e = delta_P / dPde_rho; /*--- energy value ---*/ - e += Newton_Relaxation * delta_e; + e -= Newton_Relaxation * delta_e; } Iter ++; } @@ -286,7 +287,7 @@ void CDataDrivenFluid::SetTDState_rhoT(su2double rho, su2double T) { delta_e = delta_T / dTde_rho; /*--- Update energy value ---*/ - e += Newton_Relaxation * delta_e; + e -= Newton_Relaxation * delta_e; } Iter ++; } @@ -343,7 +344,6 @@ void CDataDrivenFluid::SetTDState_hs(su2double h, su2double s) { } Iter ++; } - /*--- Calculate thermodynamic state based on converged values for density and energy ---*/ SetTDState_rhoe(rho, e); } diff --git a/SU2_CFD/src/solvers/CEulerSolver.cpp b/SU2_CFD/src/solvers/CEulerSolver.cpp index d2f07539d29c..1850649063e6 100644 --- a/SU2_CFD/src/solvers/CEulerSolver.cpp +++ b/SU2_CFD/src/solvers/CEulerSolver.cpp @@ -4658,11 +4658,14 @@ void CEulerSolver::BC_Riemann(CGeometry *geometry, CSolver **solver_container, P_Total /= config->GetPressure_Ref(); T_Total /= config->GetTemperature_Ref(); + /*--- Set initial values for density and energy for Newton solvers in fluid model ---*/ + GetFluidModel()->SetDensity(Density_i); + GetFluidModel()->SetEnergy(StaticEnergy_i); + /* --- Computes the total state --- */ GetFluidModel()->SetTDState_PT(P_Total, T_Total); Enthalpy_e = GetFluidModel()->GetStaticEnergy()+ GetFluidModel()->GetPressure()/GetFluidModel()->GetDensity(); Entropy_e = GetFluidModel()->GetEntropy(); - /* --- Compute the boundary state u_e --- */ Velocity2_e = Velocity2_i; if (nDim == 2){ From 68d02ae7036ef8fa5d93cfb6a6bc47aa9a280d4d Mon Sep 17 00:00:00 2001 From: EvertBunschoten Date: Wed, 23 Nov 2022 12:17:38 +0100 Subject: [PATCH 12/84] Added unit test for multi-layer perceptron --- .../CLookUp_ANN_tests.cpp | 35 ++++++++++++++ .../multilayer_perceptron/mlp_collection.mlp | 5 ++ .../multilayer_perceptron/simple_mlp.mlp | 47 +++++++++++++++++++ 3 files changed, 87 insertions(+) create mode 100644 UnitTests/Common/toolboxes/multilayer_perceptron/CLookUp_ANN_tests.cpp create mode 100644 UnitTests/Common/toolboxes/multilayer_perceptron/mlp_collection.mlp create mode 100644 UnitTests/Common/toolboxes/multilayer_perceptron/simple_mlp.mlp diff --git a/UnitTests/Common/toolboxes/multilayer_perceptron/CLookUp_ANN_tests.cpp b/UnitTests/Common/toolboxes/multilayer_perceptron/CLookUp_ANN_tests.cpp new file mode 100644 index 000000000000..13e3709f3f80 --- /dev/null +++ b/UnitTests/Common/toolboxes/multilayer_perceptron/CLookUp_ANN_tests.cpp @@ -0,0 +1,35 @@ +/*! + * \file CLookUp_ANN_tests.cpp + * \brief Unit tests for NdFlattener template classes. + * \author M. Aehle + * \version 7.4.0 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2022, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ + +#include "catch.hpp" +#include "../../../../Common/include/toolboxes/multilayer_perceptron/CLookUp_ANN.hpp" +#include "../../../../Common/include/toolboxes/multilayer_perceptron/CIOMap.hpp" + +TEST_CASE("LookUp ANN test", "[LookUpANN]"){ + CLookUp_ANN ANN("mlp_collection.mlp"); + +} diff --git a/UnitTests/Common/toolboxes/multilayer_perceptron/mlp_collection.mlp b/UnitTests/Common/toolboxes/multilayer_perceptron/mlp_collection.mlp new file mode 100644 index 000000000000..620e796825e5 --- /dev/null +++ b/UnitTests/Common/toolboxes/multilayer_perceptron/mlp_collection.mlp @@ -0,0 +1,5 @@ +[number of MLP files] +1 + +[MLP file names] +simple_mlp.mlp diff --git a/UnitTests/Common/toolboxes/multilayer_perceptron/simple_mlp.mlp b/UnitTests/Common/toolboxes/multilayer_perceptron/simple_mlp.mlp new file mode 100644 index 000000000000..01fb20bd102e --- /dev/null +++ b/UnitTests/Common/toolboxes/multilayer_perceptron/simple_mlp.mlp @@ -0,0 +1,47 @@ +
+ +[number of layers] +3 + +[neurons per layer] +2 +4 +1 + +[activation function] +linear +elu +linear + +[input names] +x +y + +[input normalization] ++0.0000000000000000e+00 +2.0000000000000000e+00 +-1.0000000000000000e+00 +0.0000000000000000e+00 + +[output names] +z + +[output normalization] +-1.0000000000000000e+00 +9.9998769302960888e-01 + +
+ +[weights per layer] + +-4.1729342937469482e-01 -2.2465672492980957e+00 +1.1258139610290527e+00 -7.0158332586288452e-01 ++1.1477893590927124e+00 +2.5089375674724579e-02 +2.6767715811729431e-01 +1.0031684637069702e+00 + + ++2.5062826275825500e-01 +-6.2471812963485718e-01 +-2.0578651130199432e-01 ++2.6085931062698364e-01 + + +[biases per layer] ++0.0000000000000000e+00 +0.0000000000000000e+00 +0.0000000000000000e+00 ++7.5714819133281708e-02 +7.7316933870315552e-01 -4.4279521703720093e-01 +4.1041103005409241e-01 ++2.8878501057624817e-01 From 1a40d51afd58dd01d28189e06c9337010d850af9 Mon Sep 17 00:00:00 2001 From: EvertBunschoten Date: Wed, 23 Nov 2022 15:39:36 +0100 Subject: [PATCH 13/84] Added MLP test case configuration file and unit test --- .../multilayer_perceptron/CNeuralNetwork.hpp | 184 +++++++++++++++--- .../CReadNeuralNetwork.hpp | 10 +- SU2_CFD/src/fluid/CCoolProp.cpp | 28 --- SU2_CFD/src/fluid/CDataDrivenFluid.cpp | 5 +- TestCases/nicf/coolprop/coolprop_nozzle.cfg | 2 +- .../nicf/datadriven/datadriven_nozzle.cfg | 128 ++++++++++++ .../CLookUp_ANN_tests.cpp | 36 +++- .../multilayer_perceptron/mlp_collection.mlp | 2 +- UnitTests/meson.build | 1 + 9 files changed, 330 insertions(+), 66 deletions(-) create mode 100644 TestCases/nicf/datadriven/datadriven_nozzle.cfg diff --git a/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp b/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp index 54eb19f3238e..c5aa90c30fc9 100644 --- a/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp +++ b/Common/include/toolboxes/multilayer_perceptron/CNeuralNetwork.hpp @@ -41,27 +41,28 @@ namespace MLPToolbox{ class CNeuralNetwork { private: - std::vector input_names, - output_names; + std::vector input_names, // MLP input variable names. + output_names; // MLP output variable names. - unsigned long n_hidden_layers; + unsigned long n_hidden_layers; // Number of hidden layers (layers between input and output layer). - CLayer *inputLayer, - *outputLayer; + CLayer *inputLayer, // Pointer to network input layer. + *outputLayer; // Pointer to network output layer. - std::vector hiddenLayers, - total_layers; + std::vector hiddenLayers, // Hidden layer collection. + total_layers; // Hidden layers plus in/output layers - //std::vector>> weights; - std::vector weights_mat; - std::vector> values; + std::vector weights_mat; // Weights of synapses connecting layers - std::vector> input_norm, - output_norm; - std::vector last_inputs; + std::vector> input_norm, // Normalization factors for network inputs + output_norm; // Normalization factors for network outputs - su2double* ANN_outputs; + std::vector last_inputs; // Inputs from previous lookup operation. Evaluation of the network + // is skipped if current inputs are the same as the last inputs. + su2double* ANN_outputs; // Pointer to network outputs + + /*--- Activation function types that are currently supported ---*/ enum ENUM_ACTIVATION_FUNCTION { NONE=0, LINEAR=1, @@ -79,46 +80,169 @@ class CNeuralNetwork public: CNeuralNetwork(); ~CNeuralNetwork(){ - for(std::size_t i=0; i