Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement AMR with MPI #361

Merged
merged 30 commits into from
Dec 16, 2020
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
5c7ca7d
Implement first prototype of refinement with MPI
efaulhaber Nov 30, 2020
ebee6f6
Extract function init_mpi_cache!
efaulhaber Nov 30, 2020
b3bf337
Fix n_elements in save_solution_dg.jl with MPI
efaulhaber Nov 30, 2020
46188cb
Change partition! to keep children together
efaulhaber Nov 30, 2020
76548e8
Implement coarsening with MPI
efaulhaber Nov 30, 2020
d48182c
Incorporate requested changes
efaulhaber Dec 3, 2020
52e1248
Add todo comment
efaulhaber Dec 3, 2020
ca4423f
Merge remote-tracking branch 'upstream/master' into 159-mpi-amr
efaulhaber Dec 6, 2020
ff0cc54
Add docstring for partition!
efaulhaber Dec 10, 2020
3145663
Add tests for parallel AMR
efaulhaber Dec 10, 2020
a92af06
Use correct path for test examples
efaulhaber Dec 10, 2020
72936ad
Merge remote-tracking branch 'upstream/master' into 159-mpi-amr
efaulhaber Dec 11, 2020
b351879
Use Allgatherv! instead of Allgatherv
efaulhaber Dec 11, 2020
9342c62
Implement requested changes
efaulhaber Dec 11, 2020
a2de237
Use MPI.UBuffer with MPI.Allgather!
efaulhaber Dec 11, 2020
4aca4ef
Change deprecated MPI.Gatherv to MPI.Gatherv!
efaulhaber Dec 11, 2020
1f24232
Fix 4aca4ef
efaulhaber Dec 12, 2020
cc10072
Replace deprecated MPI.Gatherv with MPI.Gatherv!
efaulhaber Dec 12, 2020
4025fe9
Use function for solution_variables in examples
efaulhaber Dec 12, 2020
0f820c0
Replace deprecated MPI.Scatterv with MPI.Scatterv!
efaulhaber Dec 12, 2020
f457047
Use different names for the TrixiExtension module
efaulhaber Dec 12, 2020
0438424
Fix 0f820c0
efaulhaber Dec 12, 2020
adc4e3e
Split long line
efaulhaber Dec 13, 2020
982f4cc
Create function nelementsglobal
efaulhaber Dec 13, 2020
d5f1e7d
Use semicolon for optional parameter
efaulhaber Dec 13, 2020
0e34c45
Add assertion for partition!
efaulhaber Dec 13, 2020
4be4a0a
Add unit test for partition! function
efaulhaber Dec 13, 2020
1d0c7c8
Fix 4be4a0a
efaulhaber Dec 13, 2020
1c8d896
Implement requested changes
efaulhaber Dec 14, 2020
356635f
Avoid allocating @assert
sloede Dec 16, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions examples/2d/elixir_advection_amr_coarsen_once.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@ using OrdinaryDiffEq
using Trixi


# define new structs inside a module to allow re-evaluating the file
module TrixiExtension
# Define new structs inside a module to allow re-evaluating the file.
# This module name needs to be unique among all examples, otherwise Julia will throw warnings
# if multiple test cases using the same module name are run in the same session.
module TrixiExtensionCoarsen

using Trixi

Expand Down Expand Up @@ -34,9 +36,9 @@ function (indicator::IndicatorAlwaysCoarsen)(u::AbstractArray{<:Any,4},
return alpha
end

end # module TrixiExtension
end # module TrixiExtensionCoarsen

import .TrixiExtension
import .TrixiExtensionCoarsen

###############################################################################
# semidiscretization of the linear advection equation
Expand Down Expand Up @@ -83,7 +85,7 @@ save_solution = SaveSolutionCallback(interval=100,
solution_variables=cons2prim)


amr_controller = ControllerThreeLevel(semi, TrixiExtension.IndicatorAlwaysCoarsen(semi),
amr_controller = ControllerThreeLevel(semi, TrixiExtensionCoarsen.IndicatorAlwaysCoarsen(semi),
base_level=2, max_level=2,
med_threshold=0.1, max_threshold=0.6)
amr_callback = AMRCallback(semi, amr_controller,
Expand Down
12 changes: 7 additions & 5 deletions examples/2d/elixir_advection_amr_refine_twice.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@ using OrdinaryDiffEq
using Trixi


# define new structs inside a module to allow re-evaluating the file
module TrixiExtension
# Define new structs inside a module to allow re-evaluating the file.
# This module name needs to be unique among all examples, otherwise Julia will throw warnings
# if multiple test cases using the same module name are run in the same session.
module TrixiExtensionRefine

using Trixi

Expand Down Expand Up @@ -34,9 +36,9 @@ function (indicator::IndicatorAlwaysRefine)(u::AbstractArray{<:Any,4},
return alpha
end

end # module TrixiExtension
end # module TrixiExtensionRefine

import .TrixiExtension
import .TrixiExtensionRefine

###############################################################################
# semidiscretization of the linear advection equation
Expand Down Expand Up @@ -83,7 +85,7 @@ save_solution = SaveSolutionCallback(interval=100,
solution_variables=cons2prim)


amr_controller = ControllerThreeLevel(semi, TrixiExtension.IndicatorAlwaysRefine(semi),
amr_controller = ControllerThreeLevel(semi, TrixiExtensionRefine.IndicatorAlwaysRefine(semi),
base_level=4, max_level=4,
med_threshold=0.1, max_threshold=0.6)
amr_callback = AMRCallback(semi, amr_controller,
Expand Down
2 changes: 1 addition & 1 deletion src/callbacks_step/amr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh,

if mpi_isparallel()
# Collect lambda for all elements
lambda_global = Vector{eltype(lambda)}(undef, cache.mpi_cache.n_elements_global)
lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(dg, cache))
# Use parent because n_elements_by_rank is an OffsetArray
recvbuf = MPI.VBuffer(lambda_global, parent(cache.mpi_cache.n_elements_by_rank))
MPI.Allgatherv!(lambda, recvbuf, mpi_comm())
Expand Down
5 changes: 3 additions & 2 deletions src/callbacks_step/save_restart_dg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ function load_restart_file(mesh::ParallelTreeMesh, equations, dg::DG, cache, res
if !mpi_isroot()
# Receive nodal data from root
for v in eachvariable(equations)
u[v, .., :] = MPI.Scatterv(eltype(u)[], node_counts, mpi_root(), mpi_comm())
MPI.Scatterv!(nothing, @view(u[v, .., :]), mpi_root(), mpi_comm())
end

return u_ode
Expand Down Expand Up @@ -180,7 +180,8 @@ function load_restart_file(mesh::ParallelTreeMesh, equations, dg::DG, cache, res

# Read variable
println("Reading variables_$v ($name)...")
u[v, .., :] = MPI.Scatterv(read(file["variables_$v"]), node_counts, mpi_root(), mpi_comm())
sendbuf = MPI.VBuffer(read(file["variables_$v"]), node_counts)
MPI.Scatterv!(sendbuf, @view(u[v, .., :]), mpi_root(), mpi_comm())
end
end

Expand Down
2 changes: 1 addition & 1 deletion src/callbacks_step/save_solution_dg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ function save_solution_file(u, time, dt, timestep,
attributes(file)["equations"] = get_name(equations)
attributes(file)["polydeg"] = polydeg(dg)
attributes(file)["n_vars"] = n_vars
attributes(file)["n_elements"] = cache.mpi_cache.n_elements_global
attributes(file)["n_elements"] = nelementsglobal(dg, cache)
attributes(file)["mesh_file"] = splitdir(mesh.current_filename)[2]
attributes(file)["time"] = convert(Float64, time) # Ensure that `time` is written as a double precision scalar
attributes(file)["dt"] = convert(Float64, dt) # Ensure that `dt` is written as a double precision scalar
Expand Down
6 changes: 4 additions & 2 deletions src/mesh/parallel.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ based on leaf cell count and tree structure.
If `allow_coarsening` is `true`, the algorithm will keep leaf cells together
on one rank when needed for local coarsening (i.e. when all children of a cell are leaves).
"""
function partition!(mesh::ParallelTreeMesh, allow_coarsening=true)
function partition!(mesh::ParallelTreeMesh; allow_coarsening=true)
# Determine number of leaf cells per rank
leaves = leaf_cells(mesh.tree)
@assert length(leaves) > mpi_nranks()
@assert length(leaves) > mpi_nranks() "Too many ranks to properly partition the mesh!"
n_leaves_per_rank = OffsetArray(fill(div(length(leaves), mpi_nranks()), mpi_nranks()),
0:(mpi_nranks() - 1))
for d in 0:(rem(length(leaves), mpi_nranks()) - 1)
Expand Down Expand Up @@ -49,6 +49,8 @@ function partition!(mesh::ParallelTreeMesh, allow_coarsening=true)
end
end

@assert all(n_leaves_per_rank .> 0) "Too many ranks to properly partition the mesh!"

mesh.n_cells_by_rank[d] = last_id - mesh.first_cell_by_rank[d] + 1
mesh.tree.mpi_ranks[mesh.first_cell_by_rank[d]:last_id] .= d

Expand Down
1 change: 1 addition & 0 deletions src/solvers/dg/dg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@ end

@inline nnodes(dg::DG) = nnodes(dg.basis)
@inline nelements(dg::DG, cache) = nelements(cache.elements)
@inline nelementsglobal(dg::DG, cache) = mpi_isparallel() ? cache.mpi_cache.n_elements_global : nelements(dg, cache)
@inline ninterfaces(dg::DG, cache) = ninterfaces(cache.interfaces)
@inline nboundaries(dg::DG, cache) = nboundaries(cache.boundaries)
@inline nmortars(dg::DG, cache) = nmortars(cache.mortars)
Expand Down
1 change: 1 addition & 0 deletions test/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SimpleMock = "a896ed2c-15a5-4479-b61d-a0e88e2a1d25"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[compat]
Expand Down
111 changes: 111 additions & 0 deletions test/test_manual.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
module TestManual

using Test
using SimpleMock
using Documenter
using Trixi

Expand Down Expand Up @@ -59,6 +60,116 @@ isdir(outdir) && rm(outdir, recursive=true)
end
end

@testset "ParallelTreeMesh" begin
@testset "partition!" begin
@testset "mpi_nranks() = 2" begin
mock((Trixi.mpi_nranks) => () -> 2) do _
mesh = TreeMesh{2, Trixi.ParallelTree{2}}(30, (0.0, 0.0), 1)
# Refine twice
Trixi.refine!(mesh.tree)
Trixi.refine!(mesh.tree)

# allow_coarsening = true
Trixi.partition!(mesh)
# Use parent for OffsetArray
@test parent(mesh.n_cells_by_rank) == [11, 10]
@test mesh.tree.mpi_ranks[1:21] ==
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
@test parent(mesh.first_cell_by_rank) == [1, 12]

# allow_coarsening = false
Trixi.partition!(mesh; allow_coarsening=false)
@test parent(mesh.n_cells_by_rank) == [11, 10]
@test mesh.tree.mpi_ranks[1:21] ==
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
@test parent(mesh.first_cell_by_rank) == [1, 12]
end
end

@testset "mpi_nranks() = 3" begin
mock((Trixi.mpi_nranks) => () -> 3) do _
mesh = TreeMesh{2, Trixi.ParallelTree{2}}(100, (0.0, 0.0), 1)
# Refine twice
Trixi.refine!(mesh.tree)
Trixi.refine!(mesh.tree)

# allow_coarsening = true
Trixi.partition!(mesh)
# Use parent for OffsetArray
@test parent(mesh.n_cells_by_rank) == [11, 5, 5]
@test mesh.tree.mpi_ranks[1:21] ==
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2]
@test parent(mesh.first_cell_by_rank) == [1, 12, 17]

# allow_coarsening = false
Trixi.partition!(mesh; allow_coarsening=false)
@test parent(mesh.n_cells_by_rank) == [9, 6, 6]
@test mesh.tree.mpi_ranks[1:21] ==
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2]
@test parent(mesh.first_cell_by_rank) == [1, 10, 16]
end
end

@testset "mpi_nranks() = 9" begin
mock((Trixi.mpi_nranks) => () -> 9) do _
mesh = TreeMesh{2, Trixi.ParallelTree{2}}(1000, (0.0, 0.0), 1)
# Refine twice
Trixi.refine!(mesh.tree)
Trixi.refine!(mesh.tree)
Trixi.refine!(mesh.tree)
Trixi.refine!(mesh.tree)

# allow_coarsening = true
Trixi.partition!(mesh)
# Use parent for OffsetArray
@test parent(mesh.n_cells_by_rank) == [44, 37, 38, 37, 37, 37, 38, 37, 36]
@test parent(mesh.first_cell_by_rank) == [1, 45, 82, 120, 157, 194, 231, 269, 306]
end
end

@testset "mpi_nranks() = 3 non-uniform" begin
mock((Trixi.mpi_nranks) => () -> 3) do _
mesh = TreeMesh{2, Trixi.ParallelTree{2}}(100, (0.0, 0.0), 1)
# Refine whole tree
Trixi.refine!(mesh.tree)
# Refine left leaf
Trixi.refine!(mesh.tree, [2])

# allow_coarsening = true
Trixi.partition!(mesh)
# Use parent for OffsetArray
@test parent(mesh.n_cells_by_rank) == [6, 1, 2]
@test mesh.tree.mpi_ranks[1:9] == [0, 0, 0, 0, 0, 0, 1, 2, 2]
@test parent(mesh.first_cell_by_rank) == [1, 7, 8]

# allow_coarsening = false
Trixi.partition!(mesh; allow_coarsening=false)
@test parent(mesh.n_cells_by_rank) == [5, 2, 2]
@test mesh.tree.mpi_ranks[1:9] == [0, 0, 0, 0, 0, 1, 1, 2, 2]
@test parent(mesh.first_cell_by_rank) == [1, 6, 8]
end
end

@testset "not enough ranks" begin
mock((Trixi.mpi_nranks) => () -> 3) do _
mesh = TreeMesh{2, Trixi.ParallelTree{2}}(100, (0.0, 0.0), 1)

# Only one leaf
@test_throws AssertionError(
"Too many ranks to properly partition the mesh!") Trixi.partition!(mesh)

# Refine to 4 leaves
Trixi.refine!(mesh.tree)

# All four leaves will need to be on one rank to allow coarsening
@test_throws AssertionError(
"Too many ranks to properly partition the mesh!") Trixi.partition!(mesh)
@test_nowarn Trixi.partition!(mesh; allow_coarsening=false)
end
end
end
end

@testset "interpolation" begin
@testset "nodes and weights" begin
@test Trixi.gauss_nodes_weights(1) == ([0.0], [2.0])
Expand Down