-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathDHP_PE_RA_FDM.h
203 lines (160 loc) · 7.79 KB
/
DHP_PE_RA_FDM.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
#include <exception>
#include <string>
#include <mpi.h>
using std::exception;
using std::string;
// ==================================================================================================================================================
// DHP_PE_RA_FDM_Exception
// ==================================================================================================================================================
class DHP_PE_RA_FDM_Exception : public exception {
public:
DHP_PE_RA_FDM_Exception(const string& msg_): msg (msg_) {}
virtual ~DHP_PE_RA_FDM_Exception() throw() {}
virtual const char* what() const throw() {
return msg.c_str();
}
private:
const string msg;
};
// ==================================================================================================================================================
// ProcParams
// ==================================================================================================================================================
struct ProcParams {
int rank;
int size;
MPI_Comm comm;
public:
ProcParams(MPI_Comm comm_ = MPI_COMM_WORLD){
comm = comm_;
MPI_Comm_rank (comm, &rank); // get current process id
MPI_Comm_size (comm, &size); // get number of processes
}
};
// ==================================================================================================================================================
// ProcComputingCoords
// ==================================================================================================================================================
// Structure for storing coords of computing area of current process
//
struct ProcComputingCoords {
int x_proc_num;
int y_proc_num;
int x_cells_num;
int x_cell_pos;
int y_cells_num;
int y_cell_pos;
// Indicates if the process touches the border
bool top;
bool bottom;
bool left;
bool right;
ProcComputingCoords ();
ProcComputingCoords (const ProcParams& procParams, const int grid_size_x, const int grid_size_y, const int x_proc_num_, const int y_proc_num_);
void Dump(const string& fout_name) const;
};
// ==================================================================================================================================================
// DHP_PE_RA_FDM
// ==================================================================================================================================================
// This class solves the Dirichlet problem for Poisson's equation in rectangular area using "steep descent iterations" for first several iterations
// and "conjugate gragient iterations" afterwards.
//
// method uses:
// five-point difference equation for Laplace operator approximation
// grid fragmentation are regular
// MPI technology for counting under supercomputers
// scalar product: (a, b) = \sum_{i=1}^{i=n-1} ( \sum_{j=1}^{j=m-1} ( h_i * h_j * a(i, j) * b(i, j) ))
//
// boundary conditions: function 'fi'
// right side of Laplace operator: function 'F'
// stopping criteria: function 'StopCriteria'
//
// Both: boundary conditions and right side of Laplace operator, must be defined in successor class.
//
// In case various errors 'DHP_PE_RA_FDM_Exception' can be thrown (any process can throw an exception, however there is some warranties, that
// exceptions relative to algorithmical problems (not MPI errors) will be thrown by each process)
//
// Dirichlet-Problem-Poisson's-Equation-Rectangular-Area-Finite-Difference-Method
//
class DHP_PE_RA_FDM {
public:
DHP_PE_RA_FDM( const double x1, const double y1, const double x2, const double y2, const int grid_size_x_, const int grid_size_y_, const double eps_,
const int descent_step_iterations_ = 1);
virtual ~DHP_PE_RA_FDM();
// Main function for solving differential equation
//
// after-effect: after function finished, each processor will have its own part of solution for target function
// solution can be found at 'double* p;'
//
void Compute (const ProcParams& procParams_in, const int x_proc_num, const int y_proc_num);
double* getSolutionPerProcess () const { return p; }
int getIterationsCounter () const { return iterations_counter; }
ProcParams getProcParams () const { return procParams; }
ProcComputingCoords getProcCoords () const { return procCoords; }
void Dump_func(const string& fout_name, const double* const f = NULL, const string& func_label = string("")) const;
const double X1;
const double Y1;
const double X2;
const double Y2;
const double hx;
const double hy;
const int grid_size_x;
const int grid_size_y;
const double eps;
protected:
// right side of Laplace operator
virtual double F (const double x, const double y) const = 0;
// boundary conditions
virtual double fi (const double x, const double y) const = 0;
// stopping criteria (must return true/false value for each process)
virtual bool StopCriteria (const double* const f1, const double* const f2);
MPI_Comm PrepareMPIComm (const ProcParams& procParams_in, const int x_proc_num, const int y_proc_num) const;
private:
// This function computes five-point difference equation for Laplace operator approximation for function f and stores result into delta_f
void Counting_5_star (double* const delta_f, const double* const f);
// This function computes scalar product of two functions. Scalar product ignores function values on the boundaries
//
// return value: global scalar_product of two functions for each of the processes
//
double ComputingScalarProduct (const double* const f1, const double* const f2);
void Initialize_F_border_with_zero (double* const f);
void Initialize_P_and_Pprev ();
void Compute_r (double* const r, const double* const delta_p) const;
void Compute_g (double* const g, const double* const r, const double alpha) const;
void Compute_p (const double tau, const double* const g);
// Debug ONLY !
// void OutputBias (const double* const f);
// Precomputed variables for speedup
double hxhy;
double hx2;
double hy2;
ProcParams procParams;
ProcComputingCoords procCoords;
int descent_step_iterations;
int iterations_counter;
// p is a double array
// p(i=x, j=y) = p [y * row_len + x]
double* p;
double* p_prev;
// in spite of the fact that this variables is temporal and used only in Counting_5_star and ComputingScalarProduct functions,
// they are too oftenly used, to be allocated on each iteration of computing process, that is why I allocate them
// at the beginning of computing process
double* send_message_lr;
double* send_message_rl;
double* send_message_td;
double* send_message_bu;
double* recv_message_lr;
double* recv_message_rl;
double* recv_message_td;
double* recv_message_bu;
MPI_Request* recv_reqs_5_star;
MPI_Request* send_reqs_5_star;
enum MPI_MessageTypes {
StarLeftRight,
StarRightLeft,
StarTopDown,
StarBottomUp,
DumpSync
};
static const bool debug = false;
// static const bool countBias = true;
string debug_fname;
};