Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Expose inter-/histopolation matrices in GlobalProjector, add unit tests #347

Open
wants to merge 29 commits into
base: devel
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 25 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
ba29e90
Expose inter-/histopolation matrices in GlobalProjector, add unit tests
camillo81 Oct 12, 2023
60592c9
Set atol=1e-5 in tests
camillo81 Oct 12, 2023
47d68c6
Merge branch 'devel' into expose-inter-histopolation-matrices
camillo81 Oct 12, 2023
b59195a
Add forgotten atol, set pyccel backend for StencilMatrix
camillo81 Oct 13, 2023
803384a
Remove 3d unit tests (take too long), remove backend option (only 1d …
camillo81 Oct 13, 2023
5fd7f65
Merge branch 'devel' into expose-inter-histopolation-matrices
camillo81 Oct 26, 2023
8f7e477
Address comments
camillo81 Nov 3, 2023
c290867
Merge branch 'devel' into expose-inter-histopolation-matrices
yguclu Nov 21, 2023
489130f
Run the 2d commuting projectors tests in parallel.
camillo81 Dec 14, 2023
39af2a0
Merge branch 'devel' into expose-inter-histopolation-matrices
yguclu Aug 22, 2024
9516a40
Print pytest's detailed summary report
yguclu Aug 26, 2024
a6ce393
Update psydac/feec/tests/test_commuting_projections.py
spossann Sep 13, 2024
214079d
Fixes in `test_3d_commuting_pro_1` (#438)
max-models Sep 18, 2024
245a45e
Updated GlobalProjector so the m>1 is handled correctly
max-models Sep 19, 2024
e66f863
Fixed bug, replaced imat with hmat
max-models Sep 20, 2024
cd8e480
Fixed indexing in stencil2coo_1d_C adn stencil2coo_1d_F for shifts > …
max-models Sep 23, 2024
28e0f89
Improve and test Pyccel compiler flags (#428)
yguclu Sep 6, 2024
dd26dd5
Update calls to `numpy.reshape` to avoid deprecation warnings (#433)
FrederikSchnack Sep 18, 2024
a3df57d
Circumvent `apt-get` download error (#440)
yguclu Sep 19, 2024
188f2d2
Mention C support in near future in README (#441)
yguclu Sep 19, 2024
c617b15
Remove `is_block` property from `VectorFemSpace` (#439)
max-models Sep 25, 2024
6fc53bb
Update `stencil2coo` kernels for shifts > 1 (#442)
max-models Sep 27, 2024
2dc2297
Add `sqrt` option to `diagonal` method of stencil/block matrices (#434)
max-models Sep 27, 2024
08b003d
Merge remote-tracking branch 'origin/devel' into expose-inter-histopo…
max-models Oct 1, 2024
6783d3f
reverted stencil2coo_kernels.py to devel version
max-models Oct 1, 2024
f75a333
Updated the checks in test_commuting_projectors.py.
max-models Oct 9, 2024
549777d
Removed TODO from test_3d_commuting_pro_2
max-models Oct 16, 2024
8c2c79a
Removed TODO from test_3d_commuting_pro_3
max-models Oct 16, 2024
f5930d1
Use the devel version of continuous-integration.yml
max-models Oct 23, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions .github/workflows/continuous-integration.yml
Original file line number Diff line number Diff line change
Expand Up @@ -174,28 +174,28 @@ jobs:
run: |
export PSYDAC_MESH_DIR=$GITHUB_WORKSPACE/mesh
export OMP_NUM_THREADS=2
python -m pytest -n auto --pyargs psydac -m "not parallel and not petsc"
python -m pytest -n auto --pyargs psydac -m "not parallel and not petsc" -ra

- name: Run MPI tests with Pytest
working-directory: ./pytest
run: |
export PSYDAC_MESH_DIR=$GITHUB_WORKSPACE/mesh
export OMP_NUM_THREADS=2
python mpi_tester.py --mpirun="mpiexec -n 4 ${MPI_OPTS}" --pyargs psydac -m "parallel and not petsc"
python mpi_tester.py --mpirun="mpiexec -n 4 ${MPI_OPTS}" --pyargs psydac -m "parallel and not petsc" -ra

- name: Run single-process PETSc tests with Pytest
working-directory: ./pytest
run: |
export PSYDAC_MESH_DIR=$GITHUB_WORKSPACE/mesh
export OMP_NUM_THREADS=2
python -m pytest -n auto --pyargs psydac -m "not parallel and petsc"
python -m pytest -n auto --pyargs psydac -m "not parallel and petsc" -ra

- name: Run MPI PETSc tests with Pytest
working-directory: ./pytest
working-directory: ./
yguclu marked this conversation as resolved.
Show resolved Hide resolved
run: |
export PSYDAC_MESH_DIR=$GITHUB_WORKSPACE/mesh
export OMP_NUM_THREADS=2
python mpi_tester.py --mpirun="mpiexec -n 4 ${MPI_OPTS}" --pyargs psydac -m "parallel and petsc"
python mpi_tester.py --mpirun="mpiexec -n 4 ${MPI_OPTS}" --pyargs psydac -m "parallel and petsc" -ra

- name: Remove test directory
if: ${{ always() }}
Expand Down
82 changes: 78 additions & 4 deletions psydac/feec/global_projectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,17 @@

import numpy as np

from psydac.linalg.kron import KroneckerLinearSolver
from psydac.linalg.stencil import StencilVector
from psydac.linalg.block import BlockLinearOperator, BlockVector
from psydac.linalg.kron import KroneckerLinearSolver, KroneckerStencilMatrix
from psydac.linalg.stencil import StencilMatrix, StencilVectorSpace
from psydac.linalg.block import BlockLinearOperator
from psydac.core.bsplines import quadrature_grid
from psydac.utilities.quadratures import gauss_legendre
from psydac.fem.basic import FemField

from psydac.fem.tensor import TensorFemSpace
from psydac.fem.vector import VectorFemSpace

from psydac.ddm.cart import DomainDecomposition, CartDecomposition
from psydac.utilities.utils import roll_edges

from abc import ABCMeta, abstractmethod
Expand Down Expand Up @@ -120,18 +121,34 @@ def __init__(self, space, nquads = None):
self._grid_x = []
self._grid_w = []
solverblocks = []
matrixblocks = []
blocks = [] # for BlockLinearOperator
for i,block in enumerate(structure):
# do for each block (i.e. each TensorFemSpace):

block_x = []
block_w = []
solvercells = []
matrixcells = []
blocks += [[]]
for j, cell in enumerate(block):
# for each direction in the tensor space (i.e. each SplineSpace):

V = tensorspaces[i].spaces[j]
s = tensorspaces[i].vector_space.starts[j]
e = tensorspaces[i].vector_space.ends[j]
p = tensorspaces[i].vector_space.pads[j]
n = tensorspaces[i].vector_space.npts[j]
m = tensorspaces[i].multiplicity[j]
periodic = tensorspaces[i].vector_space.periods[j]
ncells = tensorspaces[i].ncells[j]
blocks[-1] += [None] # fill blocks with None, fill the diagonals later

# create a distributed matrix for the current 1d SplineSpace
domain_decomp = DomainDecomposition([ncells], [periodic])
cart_decomp = CartDecomposition(domain_decomp, [n], [[s]], [[e]], [p], [m])
V_cart = StencilVectorSpace(cart_decomp)
M = StencilMatrix(V_cart, V_cart)

if cell == 'I':
# interpolation case
Expand All @@ -143,6 +160,23 @@ def __init__(self, space, nquads = None):
local_x = local_intp_x[:, np.newaxis]
local_w = np.ones_like(local_x)
solvercells += [V._interpolator]

# make 1D collocation matrix in stencil format
row_indices, col_indices = np.nonzero(V.imat)

for row_i, col_i in zip(row_indices, col_indices):

# only consider row indices on process
if row_i in range(V_cart.starts[0], V_cart.ends[0] + 1):
row_i_loc = row_i - s

M._data[row_i_loc + m*p, (col_i + p - row_i)%V.imat.shape[1]] = V.imat[row_i, col_i]

# check if stencil matrix was built correctly
assert np.allclose(M.toarray()[s:e + 1], V.imat[s:e + 1])
# TODO Fix toarray() for multiplicity m > 1
matrixcells += [M.copy()]

elif cell == 'H':
# histopolation case
if quad_x[j] is None:
Expand All @@ -159,11 +193,37 @@ def __init__(self, space, nquads = None):

local_x, local_w = quad_x[j], quad_w[j]
solvercells += [V._histopolator]

# make 1D collocation matrix in stencil format
row_indices, col_indices = np.nonzero(V.hmat)

for row_i, col_i in zip(row_indices, col_indices):

# only consider row indices on process
if row_i in range(V_cart.starts[0], V_cart.ends[0] + 1):
row_i_loc = row_i - s

M._data[row_i_loc + m*p, (col_i + p - row_i)%V.hmat.shape[1]] = V.hmat[row_i, col_i]

# check if stencil matrix was built correctly
assert np.allclose(M.toarray()[s:e + 1], V.hmat[s:e + 1])

matrixcells += [M.copy()]

else:
raise NotImplementedError('Invalid entry in structure array.')

block_x += [local_x]
block_w += [local_w]

# build Kronecker out of single directions
if isinstance(self.space, TensorFemSpace):
matrixblocks += [KroneckerStencilMatrix(self.space.vector_space, self.space.vector_space, *matrixcells)]
else:
matrixblocks += [KroneckerStencilMatrix(self.space.vector_space[i], self.space.vector_space[i], *matrixcells)]

# fill the diagonals for BlockLinearOperator
blocks[i][i] = matrixblocks[-1]

# finish block, build solvers, get dataslice to project to
self._grid_x += [block_x]
Expand All @@ -173,6 +233,13 @@ def __init__(self, space, nquads = None):

dataslice = tuple(slice(p*m, -p*m) for p, m in zip(tensorspaces[i].vector_space.pads,tensorspaces[i].vector_space.shifts))
dofs[i] = rhsblocks[i]._data[dataslice]

# build final Inter-/Histopolation matrix (distributed)
if isinstance(self.space, TensorFemSpace):
self._imat_kronecker = matrixblocks[0]
else:
self._imat_kronecker = BlockLinearOperator(self.space.vector_space, self.space.vector_space,
blocks=blocks)

# finish arguments and create a lambda
args = (*intp_x, *quad_x, *quad_w, *dofs)
Expand Down Expand Up @@ -238,6 +305,13 @@ def solver(self):
"""
return self._solver

@property
def imat_kronecker(self):
"""
Inter-/Histopolation matrix in distributed format.
"""
return self._imat_kronecker

@abstractmethod
def _structure(self, dim):
"""
Expand Down
Loading