forked from geodynamics/seismic_cpml
-
Notifications
You must be signed in to change notification settings - Fork 0
/
seismic_CPML_3D_isotropic_MPI_OpenMP.f90
1510 lines (1215 loc) · 55.3 KB
/
seismic_CPML_3D_isotropic_MPI_OpenMP.f90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
!
! SEISMIC_CPML Version 1.1.1, November 2009.
!
! Copyright Universite de Pau et des Pays de l'Adour, CNRS and INRIA, France.
! Contributor: Dimitri Komatitsch, komatitsch aT lma DOT cnrs-mrs DOT fr
!
! This software is a computer program whose purpose is to solve
! the three-dimensional isotropic elastic wave equation
! using a finite-difference method with Convolutional Perfectly Matched
! Layer (C-PML) conditions.
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 3 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
! The full text of the license is available in file "LICENSE".
program seismic_CPML_3D_iso_MPI_OpenMP
! 3D elastic finite-difference code in velocity and stress formulation
! with Convolutional-PML (C-PML) absorbing conditions.
! Dimitri Komatitsch, University of Pau, France, April 2007.
! The second-order staggered-grid formulation of Madariaga (1976) and Virieux (1986) is used.
! The C-PML implementation is based in part on formulas given in Roden and Gedney (2000).
!
! Parallel implementation based on both MPI and OpenMP.
! Type for instance "setenv OMP_NUM_THREADS 4" before running in OpenMP if you want 4 tasks.
! The C-PML implementation is based in part on formulas given in Roden and Gedney (2000).
! If you use this code for your own research, please cite some (or all) of these
! articles:
!
! @ARTICLE{MaKoEz08,
! author = {Roland Martin and Dimitri Komatitsch and Abdela\^aziz Ezziani},
! title = {An unsplit convolutional perfectly matched layer improved at grazing
! incidence for seismic wave equation in poroelastic media},
! journal = {Geophysics},
! year = {2008},
! volume = {73},
! pages = {T51-T61},
! number = {4},
! doi = {10.1190/1.2939484}}
!
! @ARTICLE{MaKo09,
! author = {Roland Martin and Dimitri Komatitsch},
! title = {An unsplit convolutional perfectly matched layer technique improved
! at grazing incidence for the viscoelastic wave equation},
! journal = {Geophysical Journal International},
! year = {2009},
! volume = {179},
! pages = {333-344},
! number = {1},
! doi = {10.1111/j.1365-246X.2009.04278.x}}
!
! @ARTICLE{MaKoGe08,
! author = {Roland Martin and Dimitri Komatitsch and Stephen D. Gedney},
! title = {A variational formulation of a stabilized unsplit convolutional perfectly
! matched layer for the isotropic or anisotropic seismic wave equation},
! journal = {Computer Modeling in Engineering and Sciences},
! year = {2008},
! volume = {37},
! pages = {274-304},
! number = {3}}
!
! @ARTICLE{KoMa07,
! author = {Dimitri Komatitsch and Roland Martin},
! title = {An unsplit convolutional {P}erfectly {M}atched {L}ayer improved
! at grazing incidence for the seismic wave equation},
! journal = {Geophysics},
! year = {2007},
! volume = {72},
! number = {5},
! pages = {SM155-SM167},
! doi = {10.1190/1.2757586}}
!
! The original CPML technique for Maxwell's equations is described in:
!
! @ARTICLE{RoGe00,
! author = {J. A. Roden and S. D. Gedney},
! title = {Convolution {PML} ({CPML}): {A}n Efficient {FDTD} Implementation
! of the {CFS}-{PML} for Arbitrary Media},
! journal = {Microwave and Optical Technology Letters},
! year = {2000},
! volume = {27},
! number = {5},
! pages = {334-339},
! doi = {10.1002/1098-2760(20001205)27:5 < 334::AID-MOP14>3.0.CO;2-A}}
! To display the results as color images in the selected 2D cut plane, use:
!
! " display image*.gif " or " gimp image*.gif "
!
! or
!
! " montage -geometry +0+3 -rotate 90 -tile 1x21 image*Vx*.gif allfiles_Vx.gif "
! " montage -geometry +0+3 -rotate 90 -tile 1x21 image*Vy*.gif allfiles_Vy.gif "
! then " display allfiles_Vx.gif " or " gimp allfiles_Vx.gif "
! then " display allfiles_Vy.gif " or " gimp allfiles_Vy.gif "
!
! IMPORTANT : all our CPML codes work fine in single precision as well (which is significantly faster).
! If you want you can thus force automatic conversion to single precision at compile time
! or change all the declarations and constants in the code from double precision to single.
implicit none
! header which contains standard MPI declarations
include 'mpif.h'
! total number of grid points in each direction of the grid
integer, parameter :: NX = 101
integer, parameter :: NY = 641
integer, parameter :: NZ = 640 ! even number in order to cut along Z axis
! number of processes used in the MPI run
! and local number of points (for simplicity we cut the mesh along Z only)
integer, parameter :: NPROC = 64
integer, parameter :: NZ_LOCAL = NZ / NPROC
! size of a grid cell
double precision, parameter :: DELTAX = 10.d0, ONE_OVER_DELTAX = 1.d0 / DELTAX
double precision, parameter :: DELTAY = DELTAX, DELTAZ = DELTAX
double precision, parameter :: ONE_OVER_DELTAY = ONE_OVER_DELTAX, ONE_OVER_DELTAZ = ONE_OVER_DELTAX
! P-velocity, S-velocity and density
double precision, parameter :: cp = 3300.d0
double precision, parameter :: cs = cp / 1.732d0
double precision, parameter :: rho = 2800.d0
double precision, parameter :: mu = rho*cs*cs
double precision, parameter :: lambda = rho*(cp*cp - 2.d0*cs*cs)
double precision, parameter :: lambdaplustwomu = rho*cp*cp
! total number of time steps
integer, parameter :: NSTEP = 2500
! time step in seconds
double precision, parameter :: DELTAT = 1.6d-3
! parameters for the source
double precision, parameter :: f0 = 7.d0
double precision, parameter :: t0 = 1.20d0 / f0
double precision, parameter :: factor = 1.d7
! flags to add PML layers to the edges of the grid
logical, parameter :: USE_PML_XMIN = .true.
logical, parameter :: USE_PML_XMAX = .true.
logical, parameter :: USE_PML_YMIN = .true.
logical, parameter :: USE_PML_YMAX = .true.
logical, parameter :: USE_PML_ZMIN = .true.
logical, parameter :: USE_PML_ZMAX = .true.
! thickness of the PML layer in grid points
integer, parameter :: NPOINTS_PML = 10
! source
! Since we cut the domain into slices along the Z direction in order to implement MPI,
! we have to tell the code in which MPI slice of the mesh the source is,
! and inside that mesh slice we need to tell it at which iz grid point it is, in the slice, thus between 1 and NZ_LOCAL.
! Here in this demo code we put the source in the middle of the model in the Z direction,
! i.e. in NZ/2, which means putting it in the cut plane (i.e. only the processor for which
! rank == rank_cut_plane will do it, and it will put it in its last point along Z, in NZ_LOCAL.
! if one wants to put the source at another location, one can invert the formulas below
! and define the grid point (ISOURCE, JSOURCE) to use as:
! double precision, parameter :: xsource = ...put here the coordinate you want...
! double precision, parameter :: ysource = ...put here the coordinate you want...
! integer, parameter :: ISOURCE = xsource / DELTAX + 1
! integer, parameter :: JSOURCE = ysource / DELTAY + 1
integer, parameter :: ISOURCE = NX - 2*NPOINTS_PML - 1
integer, parameter :: JSOURCE = 2 * NY / 3 + 1
double precision, parameter :: xsource = (ISOURCE - 1) * DELTAX
double precision, parameter :: ysource = (JSOURCE - 1) * DELTAY
! angle of source force clockwise with respect to vertical (Y) axis
double precision, parameter :: ANGLE_FORCE = 135.d0
! receivers
integer, parameter :: NREC = 2
double precision, parameter :: xdeb = xsource - 100.d0 ! first receiver x in meters
double precision, parameter :: ydeb = 2300.d0 ! first receiver y in meters
double precision, parameter :: xfin = xsource ! last receiver x in meters
double precision, parameter :: yfin = 300.d0 ! last receiver y in meters
! display information on the screen from time to time
integer, parameter :: IT_DISPLAY = 100
! value of PI
double precision, parameter :: PI = 3.141592653589793238462643d0
! conversion from degrees to radians
double precision, parameter :: DEGREES_TO_RADIANS = PI / 180.d0
! zero
double precision, parameter :: ZERO = 0.d0
! large value for maximum
double precision, parameter :: HUGEVAL = 1.d+30
! velocity threshold above which we consider that the code became unstable
double precision, parameter :: STABILITY_THRESHOLD = 1.d+25
! power to compute d0 profile
double precision, parameter :: NPOWER = 2.d0
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-11
double precision, parameter :: K_MAX_PML = 1.d0
double precision, parameter :: ALPHA_MAX_PML = 2.d0*PI*(f0/2.d0) ! from Festa and Vilotte
! arrays for the memory variables
! could declare these arrays in PML only to save a lot of memory, but proof of concept only here
double precision, dimension(NX,NY,NZ_LOCAL) :: &
memory_dvx_dx, &
memory_dvx_dy, &
memory_dvx_dz, &
memory_dvy_dx, &
memory_dvy_dy, &
memory_dvy_dz, &
memory_dvz_dx, &
memory_dvz_dy, &
memory_dvz_dz, &
memory_dsigmaxx_dx, &
memory_dsigmayy_dy, &
memory_dsigmazz_dz, &
memory_dsigmaxy_dx, &
memory_dsigmaxy_dy, &
memory_dsigmaxz_dx, &
memory_dsigmaxz_dz, &
memory_dsigmayz_dy, &
memory_dsigmayz_dz
double precision :: &
value_dvx_dx, &
value_dvx_dy, &
value_dvx_dz, &
value_dvy_dx, &
value_dvy_dy, &
value_dvy_dz, &
value_dvz_dx, &
value_dvz_dy, &
value_dvz_dz, &
value_dsigmaxx_dx, &
value_dsigmayy_dy, &
value_dsigmazz_dz, &
value_dsigmaxy_dx, &
value_dsigmaxy_dy, &
value_dsigmaxz_dx, &
value_dsigmaxz_dz, &
value_dsigmayz_dy, &
value_dsigmayz_dz
! 1D arrays for the damping profiles
double precision, dimension(NX) :: d_x,K_x,alpha_x,a_x,b_x,d_x_half,K_x_half,alpha_x_half,a_x_half,b_x_half
double precision, dimension(NY) :: d_y,K_y,alpha_y,a_y,b_y,d_y_half,K_y_half,alpha_y_half,a_y_half,b_y_half
double precision, dimension(NZ) :: d_z,K_z,alpha_z,a_z,b_z,d_z_half,K_z_half,alpha_z_half,a_z_half,b_z_half
! PML
double precision thickness_PML_x,thickness_PML_y,thickness_PML_z
double precision xoriginleft,xoriginright,yoriginbottom,yorigintop,zoriginbottom,zorigintop
double precision Rcoef,d0_x,d0_y,d0_z,xval,yval,zval,abscissa_in_PML,abscissa_normalized
! change dimension of Z axis to add two planes for MPI
double precision, dimension(NX,NY,0:NZ_LOCAL+1) :: vx,vy,vz,sigmaxx,sigmayy,sigmazz,sigmaxy,sigmaxz,sigmayz
integer, parameter :: number_of_arrays = 9 + 2*9
! for the source
double precision a,t,force_x,force_y,source_term
! for receivers
double precision xspacerec,yspacerec,distval,dist
integer, dimension(NREC) :: ix_rec,iy_rec
double precision, dimension(NREC) :: xrec,yrec
! for seismograms
double precision, dimension(NSTEP,NREC) :: sisvx,sisvy
! for evolution of total energy in the medium
double precision :: epsilon_xx,epsilon_yy,epsilon_zz,epsilon_xy,epsilon_xz,epsilon_yz
double precision :: total_energy_kinetic,total_energy_potential
double precision, dimension(NSTEP) :: total_energy
integer :: irec
! precompute some parameters once and for all
double precision, parameter :: DELTAT_lambda = DELTAT*lambda
double precision, parameter :: DELTAT_mu = DELTAT*mu
double precision, parameter :: DELTAT_lambdaplus2mu = DELTAT*lambdaplustwomu
double precision, parameter :: DELTAT_over_rho = DELTAT/rho
integer :: i,j,k,it
double precision :: Vsolidnorm,Courant_number
! timer to count elapsed time
character(len=8) datein
character(len=10) timein
character(len=5) :: zone
integer, dimension(8) :: time_values
integer ihours,iminutes,iseconds,int_tCPU
double precision :: time_start,time_end,tCPU
! names of the time stamp files
character(len=150) outputname
! main I/O file
integer, parameter :: IOUT = 41
! array needed for MPI_RECV
integer, dimension(MPI_STATUS_SIZE) :: message_status
! tag of the message to send
integer, parameter :: message_tag = 0
! number of values to send or receive
integer, parameter :: number_of_values = NX*NY
integer :: nb_procs,rank,code,rank_cut_plane,kmin,kmax,kglobal,offset_k,k2begin,kminus1end
integer :: sender_right_shift,receiver_right_shift,sender_left_shift,receiver_left_shift
!---
!--- program starts here
!---
! start MPI processes
call MPI_INIT(code)
! get total number of MPI processes in variable nb_procs
call MPI_COMM_SIZE(MPI_COMM_WORLD, nb_procs, code)
! get the rank of our process from 0 (master) to nb_procs-1 (workers)
call MPI_COMM_RANK(MPI_COMM_WORLD, rank, code)
! slice number for the cut plane in the middle of the mesh
rank_cut_plane = nb_procs/2 - 1
if (rank == rank_cut_plane) then
print *
print *,'3D elastic finite-difference code in velocity and stress formulation with C-PML'
print *
! display size of the model
print *
print *,'NX = ',NX
print *,'NY = ',NY
print *,'NZ = ',NZ
print *
print *,'NZ_LOCAL = ',NZ_LOCAL
print *,'NPROC = ',NPROC
print *
print *,'size of the model along X = ',(NX - 1) * DELTAX
print *,'size of the model along Y = ',(NY - 1) * DELTAY
print *,'size of the model along Y = ',(NZ - 1) * DELTAZ
print *
print *,'Total number of grid points = ',NX * NY * NZ
print *,'Number of points of all the arrays = ',dble(NX)*dble(NY)*dble(NZ)*number_of_arrays
print *,'Size in GB of all the arrays = ',dble(NX)*dble(NY)*dble(NZ)*number_of_arrays*8.d0/(1024.d0*1024.d0*1024.d0)
print *
print *,'In each slice:'
print *
print *,'Total number of grid points = ',NX * NY * NZ_LOCAL
print *,'Number of points of the arrays = ',dble(NX)*dble(NY)*dble(NZ_LOCAL)*number_of_arrays
print *,'Size in GB of the arrays = ',dble(NX)*dble(NY)*dble(NZ_LOCAL)*number_of_arrays*8.d0/(1024.d0*1024.d0*1024.d0)
print *
endif
! check that code was compiled with the right number of slices
if (nb_procs /= NPROC) then
print *,'nb_procs,NPROC = ',nb_procs,NPROC
stop 'nb_procs must be equal to NPROC'
endif
! we restrict ourselves to an even number of slices
! in order to have a cut plane in the middle of the mesh for visualization purposes
if (mod(nb_procs,2) /= 0) stop 'nb_procs must be even'
! check that we can cut along Z in an exact number of slices
if (mod(NZ,nb_procs) /= 0) stop 'NZ must be a multiple of nb_procs'
! check that a slice is at least as thick as a PML layer
if (NZ_LOCAL < NPOINTS_PML) stop 'NZ_LOCAL must be greater than NPOINTS_PML'
! offset of this slice when we cut along Z
offset_k = rank * NZ_LOCAL
!--- define profile of absorption in PML region
! thickness of the PML layer in meters
thickness_PML_x = NPOINTS_PML * DELTAX
thickness_PML_y = NPOINTS_PML * DELTAY
thickness_PML_z = NPOINTS_PML * DELTAZ
! reflection coefficient (INRIA report section 6.1) http://hal.inria.fr/docs/00/07/32/19/PDF/RR-3471.pdf
Rcoef = 0.001d0
! check that NPOWER is okay
if (NPOWER < 1) stop 'NPOWER must be greater than 1'
! compute d0 from INRIA report section 6.1 http://hal.inria.fr/docs/00/07/32/19/PDF/RR-3471.pdf
d0_x = - (NPOWER + 1) * cp * log(Rcoef) / (2.d0 * thickness_PML_x)
d0_y = - (NPOWER + 1) * cp * log(Rcoef) / (2.d0 * thickness_PML_y)
d0_z = - (NPOWER + 1) * cp * log(Rcoef) / (2.d0 * thickness_PML_z)
if (rank == rank_cut_plane) then
print *
print *,'d0_x = ',d0_x
print *,'d0_y = ',d0_y
print *,'d0_z = ',d0_z
endif
! PML
d_x(:) = ZERO
d_x_half(:) = ZERO
K_x(:) = 1.d0
K_x_half(:) = 1.d0
alpha_x(:) = ZERO
alpha_x_half(:) = ZERO
a_x(:) = ZERO
a_x_half(:) = ZERO
d_y(:) = ZERO
d_y_half(:) = ZERO
K_y(:) = 1.d0
K_y_half(:) = 1.d0
alpha_y(:) = ZERO
alpha_y_half(:) = ZERO
a_y(:) = ZERO
a_y_half(:) = ZERO
d_z(:) = ZERO
d_z_half(:) = ZERO
K_z(:) = 1.d0
K_z_half(:) = 1.d0
alpha_z(:) = ZERO
alpha_z_half(:) = ZERO
a_z(:) = ZERO
a_z_half(:) = ZERO
! damping in the X direction
! origin of the PML layer (position of right edge minus thickness, in meters)
xoriginleft = thickness_PML_x
xoriginright = (NX-1)*DELTAX - thickness_PML_x
do i = 1,NX
! abscissa of current grid point along the damping profile
xval = DELTAX * dble(i-1)
!---------- xmin edge
if (USE_PML_XMIN) then
! define damping profile at the grid points
abscissa_in_PML = xoriginleft - xval
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_x
d_x(i) = d0_x * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_x(i) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_x(i) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
! define damping profile at half the grid points
abscissa_in_PML = xoriginleft - (xval + DELTAX/2.d0)
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_x
d_x_half(i) = d0_x * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_x_half(i) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_x_half(i) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
endif
!---------- xmax edge
if (USE_PML_XMAX) then
! define damping profile at the grid points
abscissa_in_PML = xval - xoriginright
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_x
d_x(i) = d0_x * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_x(i) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_x(i) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
! define damping profile at half the grid points
abscissa_in_PML = xval + DELTAX/2.d0 - xoriginright
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_x
d_x_half(i) = d0_x * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_x_half(i) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_x_half(i) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
endif
! just in case, for -5 at the end
if (alpha_x(i) < ZERO) alpha_x(i) = ZERO
if (alpha_x_half(i) < ZERO) alpha_x_half(i) = ZERO
b_x(i) = exp(- (d_x(i) / K_x(i) + alpha_x(i)) * DELTAT)
b_x_half(i) = exp(- (d_x_half(i) / K_x_half(i) + alpha_x_half(i)) * DELTAT)
! this to avoid division by zero outside the PML
if (abs(d_x(i)) > 1.d-6) a_x(i) = d_x(i) * (b_x(i) - 1.d0) / (K_x(i) * (d_x(i) + K_x(i) * alpha_x(i)))
if (abs(d_x_half(i)) > 1.d-6) a_x_half(i) = d_x_half(i) * &
(b_x_half(i) - 1.d0) / (K_x_half(i) * (d_x_half(i) + K_x_half(i) * alpha_x_half(i)))
enddo
! damping in the Y direction
! origin of the PML layer (position of right edge minus thickness, in meters)
yoriginbottom = thickness_PML_y
yorigintop = (NY-1)*DELTAY - thickness_PML_y
do j = 1,NY
! abscissa of current grid point along the damping profile
yval = DELTAY * dble(j-1)
!---------- ymin edge
if (USE_PML_YMIN) then
! define damping profile at the grid points
abscissa_in_PML = yoriginbottom - yval
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_y
d_y(j) = d0_y * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_y(j) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_y(j) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
! define damping profile at half the grid points
abscissa_in_PML = yoriginbottom - (yval + DELTAY/2.d0)
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_y
d_y_half(j) = d0_y * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_y_half(j) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_y_half(j) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
endif
!---------- ymax edge
if (USE_PML_YMAX) then
! define damping profile at the grid points
abscissa_in_PML = yval - yorigintop
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_y
d_y(j) = d0_y * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_y(j) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_y(j) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
! define damping profile at half the grid points
abscissa_in_PML = yval + DELTAY/2.d0 - yorigintop
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_y
d_y_half(j) = d0_y * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_y_half(j) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_y_half(j) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
endif
b_y(j) = exp(- (d_y(j) / K_y(j) + alpha_y(j)) * DELTAT)
b_y_half(j) = exp(- (d_y_half(j) / K_y_half(j) + alpha_y_half(j)) * DELTAT)
! this to avoid division by zero outside the PML
if (abs(d_y(j)) > 1.d-6) a_y(j) = d_y(j) * (b_y(j) - 1.d0) / (K_y(j) * (d_y(j) + K_y(j) * alpha_y(j)))
if (abs(d_y_half(j)) > 1.d-6) a_y_half(j) = d_y_half(j) * &
(b_y_half(j) - 1.d0) / (K_y_half(j) * (d_y_half(j) + K_y_half(j) * alpha_y_half(j)))
enddo
! damping in the Z direction
! origin of the PML layer (position of right edge minus thickness, in meters)
zoriginbottom = thickness_PML_z
zorigintop = (NZ-1)*DELTAZ - thickness_PML_z
do k = 1,NZ
! abscissa of current grid point along the damping profile
zval = DELTAZ * dble(k-1)
!---------- zmin edge
if (USE_PML_ZMIN) then
! define damping profile at the grid points
abscissa_in_PML = zoriginbottom - zval
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_z
d_z(k) = d0_z * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_z(k) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_z(k) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
! define damping profile at half the grid points
abscissa_in_PML = zoriginbottom - (zval + DELTAZ/2.d0)
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_z
d_z_half(k) = d0_z * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_z_half(k) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_z_half(k) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
endif
!---------- zmax edge
if (USE_PML_ZMAX) then
! define damping profile at the grid points
abscissa_in_PML = zval - zorigintop
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_z
d_z(k) = d0_z * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_z(k) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_z(k) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
! define damping profile at half the grid points
abscissa_in_PML = zval + DELTAZ/2.d0 - zorigintop
if (abscissa_in_PML >= ZERO) then
abscissa_normalized = abscissa_in_PML / thickness_PML_z
d_z_half(k) = d0_z * abscissa_normalized**NPOWER
! from Stephen Gedney's unpublished class notes for class EE699, lecture 8, slide 8-2
K_z_half(k) = 1.d0 + (K_MAX_PML - 1.d0) * abscissa_normalized**NPOWER
alpha_z_half(k) = ALPHA_MAX_PML * (1.d0 - abscissa_normalized)
endif
endif
b_z(k) = exp(- (d_z(k) / K_z(k) + alpha_z(k)) * DELTAT)
b_z_half(k) = exp(- (d_z_half(k) / K_z_half(k) + alpha_z_half(k)) * DELTAT)
! this to avoid division by zero outside the PML
if (abs(d_z(k)) > 1.d-6) a_z(k) = d_z(k) * (b_z(k) - 1.d0) / (K_z(k) * (d_z(k) + K_z(k) * alpha_z(k)))
if (abs(d_z_half(k)) > 1.d-6) a_z_half(k) = d_z_half(k) * &
(b_z_half(k) - 1.d0) / (K_z_half(k) * (d_z_half(k) + K_z_half(k) * alpha_z_half(k)))
enddo
if (rank == rank_cut_plane) then
! print position of the source
print *
print *,'Position of the source:'
print *
print *,'x = ',xsource
print *,'y = ',ysource
print *
! define location of receivers
print *
print *,'There are ',nrec,' receivers'
print *
xspacerec = (xfin-xdeb) / dble(NREC-1)
yspacerec = (yfin-ydeb) / dble(NREC-1)
do irec=1,nrec
xrec(irec) = xdeb + dble(irec-1)*xspacerec
yrec(irec) = ydeb + dble(irec-1)*yspacerec
enddo
! find closest grid point for each receiver
do irec=1,nrec
dist = HUGEVAL
do j = 1,NY
do i = 1,NX
distval = sqrt((DELTAX*dble(i-1) - xrec(irec))**2 + (DELTAY*dble(j-1) - yrec(irec))**2)
if (distval < dist) then
dist = distval
ix_rec(irec) = i
iy_rec(irec) = j
endif
enddo
enddo
print *,'receiver ',irec,' x_target,y_target = ',xrec(irec),yrec(irec)
print *,'closest grid point found at distance ',dist,' in i,j = ',ix_rec(irec),iy_rec(irec)
print *
enddo
endif
! check the Courant stability condition for the explicit time scheme
! R. Courant et K. O. Friedrichs et H. Lewy (1928)
Courant_number = cp * DELTAT * sqrt(1.d0/DELTAX**2 + 1.d0/DELTAY**2 + 1.d0/DELTAZ**2)
if (rank == rank_cut_plane) then
print *,'Courant number is ',Courant_number
print *
endif
if (Courant_number > 1.d0) stop 'time step is too large, simulation will be unstable'
! erase main arrays
vx(:,:,:) = ZERO
vy(:,:,:) = ZERO
vz(:,:,:) = ZERO
sigmaxy(:,:,:) = ZERO
sigmayy(:,:,:) = ZERO
sigmazz(:,:,:) = ZERO
sigmaxz(:,:,:) = ZERO
sigmazz(:,:,:) = ZERO
sigmayz(:,:,:) = ZERO
! PML
memory_dvx_dx(:,:,:) = ZERO
memory_dvx_dy(:,:,:) = ZERO
memory_dvx_dz(:,:,:) = ZERO
memory_dvy_dx(:,:,:) = ZERO
memory_dvy_dy(:,:,:) = ZERO
memory_dvy_dz(:,:,:) = ZERO
memory_dvz_dx(:,:,:) = ZERO
memory_dvz_dy(:,:,:) = ZERO
memory_dvz_dz(:,:,:) = ZERO
memory_dsigmaxx_dx(:,:,:) = ZERO
memory_dsigmayy_dy(:,:,:) = ZERO
memory_dsigmazz_dz(:,:,:) = ZERO
memory_dsigmaxy_dx(:,:,:) = ZERO
memory_dsigmaxy_dy(:,:,:) = ZERO
memory_dsigmaxz_dx(:,:,:) = ZERO
memory_dsigmaxz_dz(:,:,:) = ZERO
memory_dsigmayz_dy(:,:,:) = ZERO
memory_dsigmayz_dz(:,:,:) = ZERO
! erase seismograms
sisvx(:,:) = ZERO
sisvy(:,:) = ZERO
! initialize total energy
total_energy(:) = ZERO
call date_and_time(datein,timein,zone,time_values)
! time_values(3): day of the month
! time_values(5): hour of the day
! time_values(6): minutes of the hour
! time_values(7): seconds of the minute
! time_values(8): milliseconds of the second
! this fails if we cross the end of the month
time_start = 86400.d0*time_values(3) + 3600.d0*time_values(5) + &
60.d0*time_values(6) + time_values(7) + time_values(8) / 1000.d0
!---
! we receive from the process on the left, and send to the process on the right
sender_right_shift = rank - 1
receiver_right_shift = rank + 1
! if we are the first process, there is no neighbor on the left
if (rank == 0) sender_right_shift = MPI_PROC_NULL
! if we are the last process, there is no neighbor on the right
if (rank == nb_procs - 1) receiver_right_shift = MPI_PROC_NULL
!---
! we receive from the process on the right, and send to the process on the left
sender_left_shift = rank + 1
receiver_left_shift = rank - 1
! if we are the first process, there is no neighbor on the left
if (rank == 0) receiver_left_shift = MPI_PROC_NULL
! if we are the last process, there is no neighbor on the right
if (rank == nb_procs - 1) sender_left_shift = MPI_PROC_NULL
k2begin = 1
if (rank == 0) k2begin = 2
kminus1end = NZ_LOCAL
if (rank == nb_procs - 1) kminus1end = NZ_LOCAL - 1
!---
!--- beginning of time loop
!---
do it = 1,NSTEP
if (rank == rank_cut_plane) print *,'it = ',it
!----------------------
! compute stress sigma
!----------------------
! vx(k+1), left shift
call MPI_SENDRECV(vx(:,:,1),number_of_values,MPI_DOUBLE_PRECISION, &
receiver_left_shift,message_tag,vx(:,:,NZ_LOCAL+1),number_of_values, &
MPI_DOUBLE_PRECISION,sender_left_shift,message_tag,MPI_COMM_WORLD,message_status,code)
! vy(k+1), left shift
call MPI_SENDRECV(vy(:,:,1),number_of_values,MPI_DOUBLE_PRECISION, &
receiver_left_shift,message_tag,vy(:,:,NZ_LOCAL+1),number_of_values, &
MPI_DOUBLE_PRECISION,sender_left_shift,message_tag,MPI_COMM_WORLD,message_status,code)
! vz(k-1), right shift
call MPI_SENDRECV(vz(:,:,NZ_LOCAL),number_of_values,MPI_DOUBLE_PRECISION, &
receiver_right_shift,message_tag,vz(:,:,0),number_of_values, &
MPI_DOUBLE_PRECISION,sender_right_shift,message_tag,MPI_COMM_WORLD,message_status,code)
!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(kglobal,i,j,k,value_dvx_dx,value_dvx_dy, &
!$OMP value_dvx_dz,value_dvy_dx,value_dvy_dy,value_dvy_dz,value_dvz_dx,value_dvz_dy, &
!$OMP value_dvz_dz,value_dsigmaxx_dx,value_dsigmayy_dy,value_dsigmazz_dz, &
!$OMP value_dsigmaxy_dx,value_dsigmaxy_dy,value_dsigmaxz_dx,value_dsigmaxz_dz, &
!$OMP value_dsigmayz_dy,value_dsigmayz_dz) SHARED(vx,vy,vz,sigmaxx,sigmayy,sigmazz, &
!$OMP sigmaxy,sigmaxz,sigmayz,memory_dvx_dx,memory_dvx_dy,memory_dvx_dz, &
!$OMP memory_dvy_dx,memory_dvy_dy,memory_dvy_dz,memory_dvz_dx,memory_dvz_dy, &
!$OMP memory_dvz_dz,memory_dsigmaxx_dx,memory_dsigmayy_dy,memory_dsigmazz_dz, &
!$OMP memory_dsigmaxy_dx,memory_dsigmaxy_dy,memory_dsigmaxz_dx,memory_dsigmaxz_dz, &
!$OMP memory_dsigmayz_dy,memory_dsigmayz_dz,a_x,b_x,K_x,a_x_half,b_x_half,K_x_half, &
!$OMP a_y,b_y,K_y,a_y_half,b_y_half,K_y_half,a_z,b_z,K_z,a_z_half,b_z_half,K_z_half,k2begin,offset_k)
do k=k2begin,NZ_LOCAL
kglobal = k + offset_k
do j=2,NY
do i=1,NX-1
value_dvx_dx = (vx(i+1,j,k)-vx(i,j,k)) * ONE_OVER_DELTAX
value_dvy_dy = (vy(i,j,k)-vy(i,j-1,k)) * ONE_OVER_DELTAY
value_dvz_dz = (vz(i,j,k)-vz(i,j,k-1)) * ONE_OVER_DELTAZ
memory_dvx_dx(i,j,k) = b_x_half(i) * memory_dvx_dx(i,j,k) + a_x_half(i) * value_dvx_dx
memory_dvy_dy(i,j,k) = b_y(j) * memory_dvy_dy(i,j,k) + a_y(j) * value_dvy_dy
memory_dvz_dz(i,j,k) = b_z(kglobal) * memory_dvz_dz(i,j,k) + a_z(kglobal) * value_dvz_dz
value_dvx_dx = value_dvx_dx / K_x_half(i) + memory_dvx_dx(i,j,k)
value_dvy_dy = value_dvy_dy / K_y(j) + memory_dvy_dy(i,j,k)
value_dvz_dz = value_dvz_dz / K_z(kglobal) + memory_dvz_dz(i,j,k)
sigmaxx(i,j,k) = DELTAT_lambdaplus2mu*value_dvx_dx + &
DELTAT_lambda*(value_dvy_dy + value_dvz_dz) + sigmaxx(i,j,k)
sigmayy(i,j,k) = DELTAT_lambda*(value_dvx_dx + value_dvz_dz) + &
DELTAT_lambdaplus2mu*value_dvy_dy + sigmayy(i,j,k)
sigmazz(i,j,k) = DELTAT_lambda*(value_dvx_dx + value_dvy_dy) + DELTAT_lambdaplus2mu*value_dvz_dz + sigmazz(i,j,k)
enddo
enddo
enddo
!$OMP END PARALLEL DO
!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(kglobal,i,j,k,value_dvx_dx,value_dvx_dy, &
!$OMP value_dvx_dz,value_dvy_dx,value_dvy_dy,value_dvy_dz,value_dvz_dx,value_dvz_dy, &
!$OMP value_dvz_dz,value_dsigmaxx_dx,value_dsigmayy_dy,value_dsigmazz_dz, &
!$OMP value_dsigmaxy_dx,value_dsigmaxy_dy,value_dsigmaxz_dx,value_dsigmaxz_dz, &
!$OMP value_dsigmayz_dy,value_dsigmayz_dz) SHARED(vx,vy,vz,sigmaxx,sigmayy,sigmazz, &
!$OMP sigmaxy,sigmaxz,sigmayz,memory_dvx_dx,memory_dvx_dy,memory_dvx_dz, &
!$OMP memory_dvy_dx,memory_dvy_dy,memory_dvy_dz,memory_dvz_dx,memory_dvz_dy, &
!$OMP memory_dvz_dz,memory_dsigmaxx_dx,memory_dsigmayy_dy,memory_dsigmazz_dz, &
!$OMP memory_dsigmaxy_dx,memory_dsigmaxy_dy,memory_dsigmaxz_dx,memory_dsigmaxz_dz, &
!$OMP memory_dsigmayz_dy,memory_dsigmayz_dz,a_x,b_x,K_x,a_x_half,b_x_half,K_x_half, &
!$OMP a_y,b_y,K_y,a_y_half,b_y_half,K_y_half,a_z,b_z,K_z,a_z_half,b_z_half,K_z_half)
do k=1,NZ_LOCAL
do j=1,NY-1
do i=2,NX
value_dvy_dx = (vy(i,j,k)-vy(i-1,j,k)) * ONE_OVER_DELTAX
value_dvx_dy = (vx(i,j+1,k)-vx(i,j,k)) * ONE_OVER_DELTAY
memory_dvy_dx(i,j,k) = b_x(i) * memory_dvy_dx(i,j,k) + a_x(i) * value_dvy_dx
memory_dvx_dy(i,j,k) = b_y_half(j) * memory_dvx_dy(i,j,k) + a_y_half(j) * value_dvx_dy
value_dvy_dx = value_dvy_dx / K_x(i) + memory_dvy_dx(i,j,k)
value_dvx_dy = value_dvx_dy / K_y_half(j) + memory_dvx_dy(i,j,k)
sigmaxy(i,j,k) = DELTAT_mu*(value_dvy_dx + value_dvx_dy) + sigmaxy(i,j,k)
enddo
enddo
enddo
!$OMP END PARALLEL DO
!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(kglobal,i,j,k,value_dvx_dx,value_dvx_dy, &
!$OMP value_dvx_dz,value_dvy_dx,value_dvy_dy,value_dvy_dz,value_dvz_dx,value_dvz_dy, &
!$OMP value_dvz_dz,value_dsigmaxx_dx,value_dsigmayy_dy,value_dsigmazz_dz, &
!$OMP value_dsigmaxy_dx,value_dsigmaxy_dy,value_dsigmaxz_dx,value_dsigmaxz_dz, &
!$OMP value_dsigmayz_dy,value_dsigmayz_dz) SHARED(vx,vy,vz,sigmaxx,sigmayy,sigmazz, &
!$OMP sigmaxy,sigmaxz,sigmayz,memory_dvx_dx,memory_dvx_dy,memory_dvx_dz, &
!$OMP memory_dvy_dx,memory_dvy_dy,memory_dvy_dz,memory_dvz_dx,memory_dvz_dy, &
!$OMP memory_dvz_dz,memory_dsigmaxx_dx,memory_dsigmayy_dy,memory_dsigmazz_dz, &
!$OMP memory_dsigmaxy_dx,memory_dsigmaxy_dy,memory_dsigmaxz_dx,memory_dsigmaxz_dz, &
!$OMP memory_dsigmayz_dy,memory_dsigmayz_dz,a_x,b_x,K_x,a_x_half,b_x_half,K_x_half, &
!$OMP a_y,b_y,K_y,a_y_half,b_y_half,K_y_half,a_z,b_z,K_z,a_z_half,b_z_half,K_z_half,kminus1end,offset_k)
do k=1,kminus1end
kglobal = k + offset_k
do j=1,NY
do i=2,NX
value_dvz_dx = (vz(i,j,k)-vz(i-1,j,k)) * ONE_OVER_DELTAX
value_dvx_dz = (vx(i,j,k+1)-vx(i,j,k)) * ONE_OVER_DELTAZ
memory_dvz_dx(i,j,k) = b_x(i) * memory_dvz_dx(i,j,k) + a_x(i) * value_dvz_dx
memory_dvx_dz(i,j,k) = b_z_half(kglobal) * memory_dvx_dz(i,j,k) + a_z_half(kglobal) * value_dvx_dz
value_dvz_dx = value_dvz_dx / K_x(i) + memory_dvz_dx(i,j,k)
value_dvx_dz = value_dvx_dz / K_z_half(kglobal) + memory_dvx_dz(i,j,k)
sigmaxz(i,j,k) = DELTAT_mu*(value_dvz_dx + value_dvx_dz) + sigmaxz(i,j,k)
enddo
enddo
do j=1,NY-1
do i=1,NX
value_dvz_dy = (vz(i,j+1,k)-vz(i,j,k)) * ONE_OVER_DELTAY
value_dvy_dz = (vy(i,j,k+1)-vy(i,j,k)) * ONE_OVER_DELTAZ
memory_dvz_dy(i,j,k) = b_y_half(j) * memory_dvz_dy(i,j,k) + a_y_half(j) * value_dvz_dy
memory_dvy_dz(i,j,k) = b_z_half(kglobal) * memory_dvy_dz(i,j,k) + a_z_half(kglobal) * value_dvy_dz
value_dvz_dy = value_dvz_dy / K_y_half(j) + memory_dvz_dy(i,j,k)
value_dvy_dz = value_dvy_dz / K_z_half(kglobal) + memory_dvy_dz(i,j,k)
sigmayz(i,j,k) = DELTAT_mu*(value_dvz_dy + value_dvy_dz) + sigmayz(i,j,k)
enddo
enddo
enddo
!$OMP END PARALLEL DO
!------------------
! compute velocity
!------------------
! sigmazz(k+1), left shift
call MPI_SENDRECV(sigmazz(:,:,1),number_of_values,MPI_DOUBLE_PRECISION, &
receiver_left_shift,message_tag,sigmazz(:,:,NZ_LOCAL+1),number_of_values, &
MPI_DOUBLE_PRECISION,sender_left_shift,message_tag,MPI_COMM_WORLD,message_status,code)
! sigmayz(k-1), right shift
call MPI_SENDRECV(sigmayz(:,:,NZ_LOCAL),number_of_values,MPI_DOUBLE_PRECISION, &
receiver_right_shift,message_tag,sigmayz(:,:,0),number_of_values, &
MPI_DOUBLE_PRECISION,sender_right_shift,message_tag,MPI_COMM_WORLD,message_status,code)
! sigmaxz(k-1), right shift
call MPI_SENDRECV(sigmaxz(:,:,NZ_LOCAL),number_of_values,MPI_DOUBLE_PRECISION, &
receiver_right_shift,message_tag,sigmaxz(:,:,0),number_of_values, &
MPI_DOUBLE_PRECISION,sender_right_shift,message_tag,MPI_COMM_WORLD,message_status,code)
!$OMP PARALLEL DO DEFAULT(NONE) PRIVATE(kglobal,i,j,k,value_dvx_dx,value_dvx_dy, &
!$OMP value_dvx_dz,value_dvy_dx,value_dvy_dy,value_dvy_dz,value_dvz_dx,value_dvz_dy, &
!$OMP value_dvz_dz,value_dsigmaxx_dx,value_dsigmayy_dy,value_dsigmazz_dz, &
!$OMP value_dsigmaxy_dx,value_dsigmaxy_dy,value_dsigmaxz_dx,value_dsigmaxz_dz, &
!$OMP value_dsigmayz_dy,value_dsigmayz_dz) SHARED(vx,vy,vz,sigmaxx,sigmayy,sigmazz, &
!$OMP sigmaxy,sigmaxz,sigmayz,memory_dvx_dx,memory_dvx_dy,memory_dvx_dz, &
!$OMP memory_dvy_dx,memory_dvy_dy,memory_dvy_dz,memory_dvz_dx,memory_dvz_dy, &
!$OMP memory_dvz_dz,memory_dsigmaxx_dx,memory_dsigmayy_dy,memory_dsigmazz_dz, &
!$OMP memory_dsigmaxy_dx,memory_dsigmaxy_dy,memory_dsigmaxz_dx,memory_dsigmaxz_dz, &
!$OMP memory_dsigmayz_dy,memory_dsigmayz_dz,a_x,b_x,K_x,a_x_half,b_x_half,K_x_half, &
!$OMP a_y,b_y,K_y,a_y_half,b_y_half,K_y_half,a_z,b_z,K_z,a_z_half,b_z_half,K_z_half,k2begin,offset_k)
do k=k2begin,NZ_LOCAL
kglobal = k + offset_k
do j=2,NY
do i=2,NX
value_dsigmaxx_dx = (sigmaxx(i,j,k)-sigmaxx(i-1,j,k)) * ONE_OVER_DELTAX
value_dsigmaxy_dy = (sigmaxy(i,j,k)-sigmaxy(i,j-1,k)) * ONE_OVER_DELTAY
value_dsigmaxz_dz = (sigmaxz(i,j,k)-sigmaxz(i,j,k-1)) * ONE_OVER_DELTAZ
memory_dsigmaxx_dx(i,j,k) = b_x(i) * memory_dsigmaxx_dx(i,j,k) + a_x(i) * value_dsigmaxx_dx
memory_dsigmaxy_dy(i,j,k) = b_y(j) * memory_dsigmaxy_dy(i,j,k) + a_y(j) * value_dsigmaxy_dy
memory_dsigmaxz_dz(i,j,k) = b_z(kglobal) * memory_dsigmaxz_dz(i,j,k) + a_z(kglobal) * value_dsigmaxz_dz
value_dsigmaxx_dx = value_dsigmaxx_dx / K_x(i) + memory_dsigmaxx_dx(i,j,k)
value_dsigmaxy_dy = value_dsigmaxy_dy / K_y(j) + memory_dsigmaxy_dy(i,j,k)
value_dsigmaxz_dz = value_dsigmaxz_dz / K_z(kglobal) + memory_dsigmaxz_dz(i,j,k)
vx(i,j,k) = DELTAT_over_rho*(value_dsigmaxx_dx + value_dsigmaxy_dy + value_dsigmaxz_dz) + vx(i,j,k)
enddo
enddo
do j=1,NY-1
do i=1,NX-1