diff --git a/lib/NeuralClosure/src/closure.jl b/lib/NeuralClosure/src/closure.jl index 07a15e8b2..624ed6b74 100644 --- a/lib/NeuralClosure/src/closure.jl +++ b/lib/NeuralClosure/src/closure.jl @@ -32,20 +32,6 @@ function create_closure(layers...; rng) closure, θ end -""" -Create tensor basis closure. -""" -function create_tensorclosure(layers...; setup, rng) - D = setup.grid.dimension() - cnn, θ = create_closure(layers...; rng) - function closure(u, θ) - B, V = tensorbasis(u, setup) - V = stack(V) - α = cnn(V, θ) - τ = sum(k -> α[ntuple(Returns(:), D)..., k] .* B[k], 1:length(B)) - end -end - """ Interpolate velocity components to volume centers. """ diff --git a/lib/NeuralClosure/src/training.jl b/lib/NeuralClosure/src/training.jl index 7bff8af7b..197c30119 100644 --- a/lib/NeuralClosure/src/training.jl +++ b/lib/NeuralClosure/src/training.jl @@ -113,8 +113,7 @@ create_relerr_prior(f, x, y) = θ -> norm(f(x, θ) - y) / norm(y) """ Create a-posteriori loss function. """ -function create_loss_post(; setup, method, psolver, closure, nsubstep = 1) - closure_model = wrappedclosure(closure, setup) +function create_loss_post(; setup, method, psolver, closure_model, nsubstep = 1) setup = (; setup..., closure_model) (; Iu) = setup.grid inside = Iu[1] diff --git a/lib/NeuralClosure/test/examplerun.jl b/lib/NeuralClosure/test/examplerun.jl index 82bc38f64..28b40e6e5 100644 --- a/lib/NeuralClosure/test/examplerun.jl +++ b/lib/NeuralClosure/test/examplerun.jl @@ -112,7 +112,7 @@ setup, psolver, method = RKMethods.RK44(), - m.closure, + closure_model = wrappedclosure(m.closure, setup), nsubstep = 1, ) dataloader = create_dataloader_post( diff --git a/lib/PaperDC/src/train.jl b/lib/PaperDC/src/train.jl index 3b5b8368a..9d46da222 100644 --- a/lib/PaperDC/src/train.jl +++ b/lib/PaperDC/src/train.jl @@ -219,7 +219,7 @@ function trainpost(; setup, psolver, method = RKProject(params.method, projectorder), - closure, + closure_model = wrappedclosure(closure, setup), nsubstep, # Time steps per loss evaluation ) data_train = diff --git a/lib/SymmetryClosure/Project.toml b/lib/SymmetryClosure/Project.toml index d17e3da69..2ebb071c4 100644 --- a/lib/SymmetryClosure/Project.toml +++ b/lib/SymmetryClosure/Project.toml @@ -4,11 +4,13 @@ authors = ["Syver Døving Agdestein "] version = "1.0.0" [deps] +Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" +Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" IncompressibleNavierStokes = "5e318141-6589-402b-868d-77d7df8c442e" JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" @@ -20,6 +22,7 @@ NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" NeuralClosure = "099dac27-d7f2-4047-93d5-0baee36b9c25" Observables = "510215fc-4207-5dde-b226-833fc4488ee2" Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" +ParameterSchedulers = "d7d3b36b-41b8-4d0d-a2bf-768c6151755e" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" @@ -30,11 +33,13 @@ IncompressibleNavierStokes = {path = "../.."} NeuralClosure = {path = "../NeuralClosure"} [compat] +Accessors = "0.1" Adapt = "4" CUDA = "5" CairoMakie = "0.12" -ChainRulesCore = "1.25.0" +ChainRulesCore = "1" ComponentArrays = "0.15" +Dates = "1" FFTW = "1" IncompressibleNavierStokes = "2" JLD2 = "0.5" @@ -46,6 +51,7 @@ NNlib = "0.9" NeuralClosure = "1" Observables = "0.5" Optimisers = "0.3, 0.4" -StaticArrays = "1.9.8" +ParameterSchedulers = "0.4.3" +StaticArrays = "1" Zygote = "0.6" julia = "1.9" diff --git a/lib/SymmetryClosure/generate_data.jl b/lib/SymmetryClosure/generate_data.jl index ac2c73df0..0d247df33 100644 --- a/lib/SymmetryClosure/generate_data.jl +++ b/lib/SymmetryClosure/generate_data.jl @@ -1,4 +1,4 @@ -# LSP hack #src +# LSP hack (to get "go to definition" etc. working) #src if false #src include("src/SymmetryClosure.jl") #src include("../NeuralClosure/src/NeuralClosure.jl") #src @@ -18,72 +18,30 @@ end #src ########################################################################## #src -@info "Loading packages" -flush(stdout) +# ## Setup -using Adapt -using CUDA -using Dates -using JLD2 -using NeuralClosure -using Random using SymmetryClosure -########################################################################## #src - -# SLURM specific variables -jobid = haskey(ENV, "SLURM_JOB_ID") ? parse(Int, ENV["SLURM_JOB_ID"]) : nothing -taskid = - haskey(ENV, "SLURM_ARRAY_TASK_ID") ? parse(Int, ENV["SLURM_ARRAY_TASK_ID"]) : nothing +# Get SLURM specific variables (if any) +(; jobid, taskid) = slurm_vars() -isnothing(jobid) || @info "Running on SLURM (jobid = $jobid)" -isnothing(taskid) || @info "Task id = $taskid)" +# Log info about current run +time_info() -########################################################################## #src +# Hardware selection +(; backend, device, clean) = hardware() -@info "Starting at $(Dates.now())" -@info """ -Last commit: +# Test case +(; params, outdir, plotdir, seed_dns, ntrajectory) = testcase(backend) -$(cd(() -> read(`git log -n 1`, String), @__DIR__)) -""" +# DNS seeds +dns_seeds = splitseed(seed_dns, ntrajectory) ########################################################################## #src -# ## Hardware selection - -if CUDA.functional() - ## For running on a CUDA compatible GPU - @info "Running on CUDA" - backend = CUDABackend() - CUDA.allowscalar(false) - device = x -> adapt(CuArray, x) - clean() = (GC.gc(); CUDA.reclaim()) -else - ## For running on CPU. - ## Consider reducing the sizes of DNS, LES, and CNN layers if - ## you want to test run on a laptop. - @warn "Running on CPU" - backend = CPU() - device = identity - clean() = nothing -end - -########################################################################## #src +# ## Create data -# ## Data generation -# -# Create filtered DNS data for training, validation, and testing. - -# Parameters -case = SymmetryClosure.testcase() - -# DNS seeds -ntrajectory = 8 -seeds = splitseed(case.seed_dns, ntrajectory) - -# Create data -for (iseed, seed) in enumerate(seeds) +for (iseed, seed) in enumerate(dns_seeds) if isnothing(taskid) || iseed == taskid @info "Creating DNS trajectory for seed $(repr(seed))" else @@ -91,13 +49,13 @@ for (iseed, seed) in enumerate(seeds) @info "Skipping seed $(repr(seed)) for task $taskid" continue end - filenames = map(Iterators.product(params.nles, params.filters)) do nles, Φ + filenames = map(Iterators.product(params.nles, params.filters)) do (nles, Φ) f = getdatafile(outdir, nles, Φ, seed) datadir = dirname(f) ispath(datadir) || mkpath(datadir) f end - data = create_les_data(; case.params..., rng = Xoshiro(seed), filenames) + data = create_les_data(; params..., backend, rng = Xoshiro(seed), filenames) @info( "Trajectory info:", data[1].comptime / 60, @@ -106,8 +64,11 @@ for (iseed, seed) in enumerate(seeds) ) end +########################################################################## #src + # Computational time -docomp = false + +docomp = true docomp && let comptime, datasize = 0.0, 0.0 for seed in dns_seeds diff --git a/lib/SymmetryClosure/src/SymmetryClosure.jl b/lib/SymmetryClosure/src/SymmetryClosure.jl index 1bbc9eb24..adf10ef89 100644 --- a/lib/SymmetryClosure/src/SymmetryClosure.jl +++ b/lib/SymmetryClosure/src/SymmetryClosure.jl @@ -1,14 +1,36 @@ module SymmetryClosure -using StaticArrays +using Accessors +using Adapt using ChainRulesCore +using CUDA +using Dates +using IncompressibleNavierStokes +using JLD2 using KernelAbstractions using LinearAlgebra -using IncompressibleNavierStokes +using Lux using NeuralClosure +using NNlib +using Optimisers +using Random +using StaticArrays include("tensorclosure.jl") +include("setup.jl") +include("cases.jl") +include("train.jl") -export tensorclosure, polynomial +export tensorclosure, polynomial, create_cnn +export slurm_vars, + time_info, + hardware, + splatfileparts, + getdatafile, + namedtupleload, + splitseed, + getsetup, + testcase +export trainpost end diff --git a/lib/SymmetryClosure/src/cases.jl b/lib/SymmetryClosure/src/cases.jl index f2f31a058..581eb3ef4 100644 --- a/lib/SymmetryClosure/src/cases.jl +++ b/lib/SymmetryClosure/src/cases.jl @@ -1,25 +1,27 @@ -function testcase() - # Choose where to put output +function testcase(backend) basedir = haskey(ENV, "DEEPDIP") ? ENV["DEEPDIP"] : joinpath(@__DIR__, "..") - outdir = mkpath(joinpath(basedir, "output", "kolmogorov")) - + outdir = mkpath(joinpath(basedir, "output", "Kolmogorov2D")) + plotdir = mkpath(joinpath(outdir, "plots")) seed_dns = 123 + ntrajectory = 8 T = Float32 - params = (; D = 2, lims = (T(0), T(1)), Re = T(6e3), tburn = T(0.5), - tsim = T(5), + tsim = T(2), savefreq = 50, - ndns = 4096, + ndns = 1024, nles = [32, 64, 128], filters = [FaceAverage()], + backend, icfunc = (setup, psolver, rng) -> random_field(setup, T(0); kp = 20, psolver, rng), - method = RKMethods.LMWray3(; T), + method = LMWray3(; T), bodyforce = (dim, x, y, t) -> (dim == 1) * 5 * sinpi(8 * y), issteadybodyforce = true, + processors = (; log = timelogger(; nupdate = 100)), ) + (; outdir, plotdir, seed_dns, ntrajectory, params) end diff --git a/lib/SymmetryClosure/src/setup.jl b/lib/SymmetryClosure/src/setup.jl new file mode 100644 index 000000000..b8a15dbe7 --- /dev/null +++ b/lib/SymmetryClosure/src/setup.jl @@ -0,0 +1,61 @@ +# Some script utils + +function slurm_vars() + jobid = haskey(ENV, "SLURM_JOB_ID") ? parse(Int, ENV["SLURM_JOB_ID"]) : nothing + taskid = + haskey(ENV, "SLURM_ARRAY_TASK_ID") ? parse(Int, ENV["SLURM_ARRAY_TASK_ID"]) : + nothing + isnothing(jobid) || @info "Running on SLURM" jobid taskid + (; jobid, taskid) +end + +function time_info() + @info "Starting at $(Dates.now())" + @info """ + Last commit: + + $(cd(() -> read(`git log -n 1`, String), @__DIR__)) + """ +end + +hardware() = + if CUDA.functional() + @info "Running on CUDA" + CUDA.allowscalar(false) + backend = CUDABackend() + device = x -> adapt(backend, x) + clean = () -> (GC.gc(); CUDA.reclaim()) + (; backend, device, clean) + else + @warn """ + Running on CPU. + Consider reducing the size of DNS, LES, and CNN layers if + you want to test run on a laptop. + """ + (; backend = CPU(), device = identity, clean = () -> nothing) + end + +function splatfileparts(args...; kwargs...) + sargs = string.(args) + skwargs = map((k, v) -> string(k) * "=" * string(v), keys(kwargs), values(kwargs)) + s = [sargs..., skwargs...] + join(s, "_") +end + +getdatafile(outdir, nles, filter, seed) = + joinpath(outdir, "data", splatfileparts(; seed = repr(seed), filter, nles) * ".jld2") + +function namedtupleload(file) + dict = load(file) + k, v = keys(dict), values(dict) + pairs = @. Symbol(k) => v + (; pairs...) +end + +getsetup(; params, nles) = Setup(; + x = ntuple(α -> range(params.lims..., nles + 1), params.D), + params.Re, + params.backend, + params.bodyforce, + params.issteadybodyforce, +) diff --git a/lib/SymmetryClosure/src/tensorclosure.jl b/lib/SymmetryClosure/src/tensorclosure.jl index 23c8bc1e5..13ab48b8f 100644 --- a/lib/SymmetryClosure/src/tensorclosure.jl +++ b/lib/SymmetryClosure/src/tensorclosure.jl @@ -1,21 +1,23 @@ -function tensorclosure(model, u, θ, setup) +function tensorclosure(u, θ, model, setup) (; Δ) = setup.grid @assert all(Δi -> all(≈(Δi[1]), Δi), Array.(Δ)) Δx = Array(Δ[1])[1] B, V = tensorbasis(u, setup) x = model(V, θ) - x .*= Δx^2 + x = x .* (Δx^2) # τ = sum(x .* B; dims = ndims(B)) τ = IncompressibleNavierStokes.lastdimcontract(x, B, setup) τ = apply_bc_p(τ, zero(eltype(u)), setup) IncompressibleNavierStokes.divoftensor(τ, setup) end +tensorclosure(model, setup) = (u, θ) -> tensorclosure(u, θ, model, setup) + function polynomial(V, θ) s..., nV = size(V) V = eachslice(V; dims = ndims(V)) basis = if nV == 2 - cat(V[1], V[2], V[1] .* V[2], V[1] .^ 2, V[2] .^ 2; dims = length(s) + 1) + cat(V[1], V[2], V[1] .* V[2], V[1] .* V[1], V[2] .* V[2]; dims = length(s) + 1) elseif nV == 5 cat( V[1], @@ -23,21 +25,21 @@ function polynomial(V, θ) V[3], V[4], V[5], + V[1] .* V[1], V[1] .* V[2], V[1] .* V[3], V[1] .* V[4], V[1] .* V[5], + V[2] .* V[2], V[2] .* V[3], V[2] .* V[4], V[2] .* V[5], + V[3] .* V[3], V[3] .* V[4], V[3] .* V[5], + V[4] .* V[4], V[4] .* V[5], - V[1] .^ 2, - V[2] .^ 2, - V[3] .^ 2, - V[4] .^ 2, - V[5] .^ 2; + V[5] .* V[5]; dims = length(s) + 1, ) end @@ -45,3 +47,64 @@ function polynomial(V, θ) x = sum(θ .* basis; dims = ndims(basis)) reshape(x, s..., size(x, ndims(x))) end + +function create_cnn(; setup, radii, channels, activations, use_bias, rng) + r, c, σ, b = radii, channels, activations, use_bias + (; grid) = setup + (; dimension, x) = grid + D = dimension() + T = eltype(x[1]) + + # dx = map(d -> d[2:end-1], Δu) + + # Weight initializer + glorot_uniform_T(rng::AbstractRNG, dims...) = glorot_uniform(rng, T, dims...) + + # Correct output channels + if D == 2 + @assert c[end] == 3 # 3 basis tensors + elseif D == 3 + @assert c[end] == 18 # 18 basis tensors + end + + cin = if D == 2 + 2 # 2 invariants + elseif D == 3 + 5 # 5 invariants + end + + # Add input channel size + c = [cin; c] + + # Add padding so that output has same shape as input + # padder = ntuple(α -> (u -> pad_circular(u, sum(r); dims = α)), D) + padder = u -> pad_circular(u, sum(r)) + + # Some convolutional layers + convs = map( + i -> Conv( + ntuple(α -> 2r[i] + 1, D), + c[i] => c[i+1], + σ[i]; + use_bias = b[i], + init_weight = glorot_uniform_T, + ), + eachindex(r), + ) + + # Create convolutional closure model + m, θ_start = NeuralClosure.create_closure(padder, convs...; rng) + + inside = setup.grid.Ip + + function cnn_coeffs(V, θ) + s..., nchan = size(V) + V = V[inside, :] # Remove boundaries + V = reshape(V, size(V)..., 1) # Add sample dim + coeffs = m(V, θ) + coeffs = pad_circular(coeffs, 1) # Add boundaries + coeffs = reshape(coeffs, s..., :) # Remove sample dim + end + + cnn_coeffs, θ_start +end diff --git a/lib/SymmetryClosure/src/train.jl b/lib/SymmetryClosure/src/train.jl new file mode 100644 index 000000000..0e2dc753e --- /dev/null +++ b/lib/SymmetryClosure/src/train.jl @@ -0,0 +1,113 @@ +function trainpost(; + params, + outdir, + plotdir, + taskid, + postseed, + dns_seeds_train, + dns_seeds_valid, + nsubstep, + nunroll, + ntrajectory, + closure_models, + θ_start, + opt, + λ = nothing, + scheduler, + nunroll_valid, + nupdate_callback, + displayref, + displayupdates, + loadcheckpoint, + nepoch, + niter, +) + device = x -> adapt(params.backend, x) + Φ = params.filters[1] + for (igrid, nles) in enumerate(params.nles) + igrid == 1 || continue + if isnothing(taskid) || igrid == taskid + @info "Training a-posteriori" nles + else + # Each task does one training + @info "Skipping a-posteriori training for iteration $(igrid) != $(taskid)" + continue + end + starttime = time() + closure_model = closure_models[igrid] + file = joinpath(outdir, "training", splatfileparts(; nles) * ".jld2") + ispath(dirname(file)) || mkpath(dirname(file)) + figdir = mkpath(joinpath(outdir, "training")) + figfile = joinpath(figdir, splitext(basename(file))[1] * ".pdf") + checkfile = join(splitext(file), "_checkpoint") + setup = getsetup(; params, nles) + psolver = default_psolver(setup) + loss = create_loss_post(; + setup, + psolver, + params.method, + closure_model, + nsubstep, # Time steps per loss evaluation + ) + data_train = + map(s -> namedtupleload(getdatafile(outdir, nles, Φ, s)), dns_seeds_train) + data_valid = + map(s -> namedtupleload(getdatafile(outdir, nles, Φ, s)), dns_seeds_valid) + dataloader = create_dataloader_post( + map(d -> (; d.u, d.t), data_train); + device, + nunroll, + ntrajectory, + ) + θ = θ_start |> device + optstate = Optimisers.setup(opt, θ) + (; callbackstate, callback) = let + d = data_valid[1] + it = 1:nunroll_valid + data = + (; u = selectdim(d.u, ndims(d.u), it) |> collect |> device, t = d.t[it]) + create_callback( + create_relerr_post(; + data, + setup, + psolver, + params.method, + closure_model, + nsubstep, + ); + θ, + figfile, + displayref, + displayupdates, + nupdate = nupdate_callback, + ) + end + if loadcheckpoint + @info "Resuming from checkpoint $checkfile" + (; icheck, trainstate, callbackstate) = namedtupleload(checkfile) + @assert eltype(callbackstate.θmin) == Float32 "gpu_device() only works with Float32" + trainstate = trainstate |> gpu_device() + @reset callbackstate.θmin = callbackstate.θmin |> gpu_device() + else + icheck = 0 + trainstate = (; optstate, θ, rng = Xoshiro(postseed)) + callbackstate = callback(callbackstate, trainstate) # Initial callback + end + for iepoch = icheck+1:nepoch + (; trainstate, callbackstate) = + train(; dataloader, loss, trainstate, niter, callbackstate, callback, λ) + if !isnothing(scheduler) + eta = scheduler(iepoch) + Optimisers.adjust!(trainstate.optstate, eta) + end + @info "Saving checkpoint to $(basename(checkfile))" + c = callbackstate |> cpu_device() + t = trainstate |> cpu_device() + jldsave(checkfile; icheck = iepoch, callbackstate = c, trainstate = t) + end + θ = callbackstate.θmin # Use best θ instead of last θ + results = (; θ = Array(θ), comptime = time() - starttime) + save_object(file, results) + end + @info "Finished a-posteriori training." +end diff --git a/lib/SymmetryClosure/test_tensor.jl b/lib/SymmetryClosure/test_tensor.jl index edd39ea0c..9efd16347 100644 --- a/lib/SymmetryClosure/test_tensor.jl +++ b/lib/SymmetryClosure/test_tensor.jl @@ -3,7 +3,7 @@ if false using .SymmetryClosure end -# using CairoMakie +# using GLMakie using CairoMakie using IncompressibleNavierStokes using SymmetryClosure diff --git a/lib/SymmetryClosure/train_models.jl b/lib/SymmetryClosure/train_models.jl new file mode 100644 index 000000000..fa843304e --- /dev/null +++ b/lib/SymmetryClosure/train_models.jl @@ -0,0 +1,119 @@ +# LSP hack (to get "go to definition" etc. working) #src +if false #src + include("src/SymmetryClosure.jl") #src + include("../NeuralClosure/src/NeuralClosure.jl") #src + include("../../src/IncompressibleNavierStokes.jl") #src + using .SymmetryClosure #src + using .NeuralClosure #src + using .IncompressibleNavierStokes #src +end #src + +########################################################################## #src + +# # Train models +# +# This script is used to train models. + +@info "# Train models" + +########################################################################## #src + +# ## Packages + +@info "Loading packages" +flush(stdout) + +using Accessors +using CairoMakie +using IncompressibleNavierStokes +using Lux +using LuxCUDA +using Optimisers +using ParameterSchedulers +using NeuralClosure +using Random +using SymmetryClosure +using Zygote + +########################################################################## #src + +# ## Setup + +# Get SLURM specific variables (if any) +(; jobid, taskid) = slurm_vars() + +# Log info about current run +time_info() + +# Hardware selection +(; backend, device, clean) = hardware() + +# Test case +(; params, outdir, plotdir, seed_dns, ntrajectory) = testcase(backend) + +# DNS seeds +dns_seeds = splitseed(seed_dns, ntrajectory) +dns_seeds_test = dns_seeds[1:1] +dns_seeds_valid = dns_seeds[2:2] +dns_seeds_train = dns_seeds[3:end] + +########################################################################## #src + +# ## Model definitions + +setups = map(nles -> getsetup(; params, nles), params.nles) + +tensorcoeffs, θ_start = polynomial, zeros(5, 3) +tensorcoeffs, θ_start = create_cnn(; + setup = setups[1], + radii = [2, 2, 2], + channels = [12, 12, 3], + activations = [tanh, tanh, identity], + use_bias = [true, true, false], + rng = Xoshiro(123), +); +tensorcoeffs.m.chain +closure_models = map(s -> tensorclosure(tensorcoeffs, s), setups) + +# Test run +let + data_test = map( + s -> namedtupleload(getdatafile(outdir, params.nles[1], params.filters[1], s)), + dns_seeds_test, + ) + @show size(data_test[1].u) + sample = data_test[1].u[:, :, :, 1] |> device + θ = θ_start |> device # |> randn! |> x -> x ./ 1e6 + closure_models[1](sample, θ) + gradient(θ -> sum(closure_models[1](sample, θ)), θ)[1] +end + +# Train +let + nepoch = 10 + T = typeof(params.Re) + trainpost(; + params, + outdir, + plotdir, + taskid, + postseed = 123, + dns_seeds_train, + dns_seeds_valid, + nsubstep = 5, + nunroll = 10, + ntrajectory = 5, + closure_models, + θ_start, + opt = Adam(T(1e-4)), + λ = T(5e-8), + scheduler = CosAnneal(; l0 = T(1e-6), l1 = T(1e-3), period = nepoch), + nunroll_valid = 50, + nupdate_callback = 10, + displayref = false, + displayupdates = true, + loadcheckpoint = false, + nepoch, + niter = 100, + ) +end diff --git a/src/boundary_conditions.jl b/src/boundary_conditions.jl index c4751c0b1..8d5441f19 100644 --- a/src/boundary_conditions.jl +++ b/src/boundary_conditions.jl @@ -328,8 +328,10 @@ function apply_bc_p_pullback!(::PeriodicBC, φbar, β, t, setup; isright, kwargs Jb = Ib .- eβ @. φbar[Jb] += φbar[Ia] @. φbar[Ja] += φbar[Ib] - @. φbar[Ia] = 0 - @. φbar[Ib] = 0 + # @. φbar[Ia] = 0 + # @. φbar[Ib] = 0 + fill!(view(φbar, Ia), zero(eltype(φbar))) # For tensor-valued fields + fill!(view(φbar, Ib), zero(eltype(φbar))) φbar end diff --git a/src/tensorbasis.jl b/src/tensorbasis.jl index b3ec6b00c..f6f9d1a3a 100644 --- a/src/tensorbasis.jl +++ b/src/tensorbasis.jl @@ -88,7 +88,6 @@ end Rbar = S' * Bbar[I, 3] - Bbar[I, 3] * S' + 2 * Vbar[I, 2] * R ∇ubar = (Sbar + Sbar') / 2 + (Rbar - Rbar') / 2 ∇_adjoint!(∇ubar, ubar, I, Δ, Δu) - ubar end @kernel function tensorbasis_adjoint_kernel!(::Dimension{3}, ubar, Bbar, Vbar, u, Δ, Δu, I0)