Skip to content

Commit

Permalink
Merge pull request #4 from ThummeTo/main
Browse files Browse the repository at this point in the history
v0.12.0 fix examples (ThummeTo#126)
  • Loading branch information
0815Creeper authored Feb 22, 2024
2 parents 6e5cf15 + 77471fb commit 4a2de81
Show file tree
Hide file tree
Showing 9 changed files with 29 additions and 19 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/Example.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ jobs:
echo "starting gif fixing"
mv examples/src/gif_*.gif examples/src/${{ matrix.file-name }}_files
$env:Path += ";C:\Program Files\Git\usr\bin"
awk '{if($0~/<img src=\"data:image\/gif;base64,[[:alpha:],[:digit:],\/,+,=]*\" \/>/) {sub(/<img src=\"data:image\/gif;base64,[[:alpha:],[:digit:],\/,+,=]*\" \/>/,\"![gif](${{ matrix.file-name }}_files\/gif_\"++i\".gif)\")}}1' examples/src/${{ matrix.file-name }}.md > examples/src/tmp_${{ matrix.file-name }}.md
awk '{if($0~/<img src="data:image\/gif;base64,[[:alpha:],[:digit:],\/,+,=]*" \/>/) {sub(/<img src="data:image\/gif;base64,[[:alpha:],[:digit:],\/,+,=]*" \/>/,"![gif](${{ matrix.file-name }}_files\/gif_"++i".gif)")}}1' examples/src/${{ matrix.file-name }}.md > examples/src/tmp_${{ matrix.file-name }}.md
mv -Force examples/src/tmp_${{ matrix.file-name }}.md examples/src/${{ matrix.file-name }}.md
echo "gifs should be fixed"
Expand Down
4 changes: 2 additions & 2 deletions examples/src/growing_horizon_ME.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -641,10 +641,10 @@
"outputs": [],
"source": [
"# train\n",
"paramsNet = FMIFlux.params(neuralFMU)\n",
"paramsNet = Flux.params(neuralFMU)\n",
"\n",
"optim = Adam()\n",
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 1000), optim; cb=()->callb(paramsNet)) "
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 1000), optim; cb=()->callb(paramsNet)) "
]
},
{
Expand Down
5 changes: 4 additions & 1 deletion examples/src/juliacon_2023.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -465,10 +465,13 @@
" gates, # compute resulting dx from ANN + FMU\n",
" dx -> cacheRetrieve(1:4, dx)) # stack together: dx[1,2,3,4] from cache + dx[5,6] from gates\n",
"\n",
" solver = Tsit5()\n",
" \n",
" # new NeuralFMU \n",
" neuralFMU = ME_NeuralFMU(f, # the FMU used in the NeuralFMU \n",
" model, # the model we specified above \n",
" (tStart, tStop), # a default start ad stop time for solving the NeuralFMU\n",
" solver;\n",
" saveat=tSave) # the time points to save the solution at\n",
" neuralFMU.modifiedState = false # speed optimization (NeuralFMU state equals FMU state)\n",
" \n",
Expand Down Expand Up @@ -740,7 +743,7 @@
" \n",
" # the actual training\n",
" FMIFlux.train!(loss, # the loss function for training\n",
" params, # the parameters to train\n",
" neuralFMU, # the parameters to train\n",
" Iterators.repeated((), steps), # an iterator repeating `steps` times\n",
" optim; # the optimizer to train\n",
" gradient=:ForwardDiff, # currently, only ForwarDiff leads to good results for multi-event systems\n",
Expand Down
1 change: 1 addition & 0 deletions examples/src/juliacon_2023_helpers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import FMIFlux: roundToLength
import FMIZoo: movavg

import FMI: FMU2Solution
import FMI.DifferentialEquations: Tsit5
import FMIZoo: VLDM, VLDM_Data

function fmiSingleInstanceMode(fmu::FMU2, mode::Bool)
Expand Down
5 changes: 3 additions & 2 deletions examples/src/mdpi_2022.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@
"using FMIFlux.Flux # Machine Learning in Julia\n",
"\n",
"import FMI.DifferentialEquations: Tsit5 # import the Tsit5-solver\n",
"import FMI: FMU2Solution\n",
"using JLD2 # data format for saving/loading parameters\n",
"\n",
"# plotting\n",
Expand Down Expand Up @@ -611,14 +612,14 @@
"\n",
"# we use ForwardDiff for gradinet determination, because the FMU throws multiple events per time instant (this is not supported by reverse mode AD)\n",
"# the chunk_size controls the nuber of forward evaluations of the model (the bigger, the less evaluations)\n",
"FMIFlux.train!(loss, params, Iterators.repeated((), batchLen), optim; gradient=:ForwardDiff, chunk_size=32, cb=updateScheduler) \n",
"FMIFlux.train!(loss, neuralFMU, Iterators.repeated((), batchLen), optim; gradient=:ForwardDiff, chunk_size=32, cb=updateScheduler) \n",
"loss_after = batch_loss(params[1])"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"metadata": {},
"source": [
"The batch loss (\"AVG\" and \"MAX\") is only updated every 5 steps, as defined in the scheduler. Every 25 steps, we plot the current batch element losses. Please note, that we only did around 100 training steps, so training has not converged for now. But we are curious and want to have a look on the intermediate results. \n",
"\n",
Expand Down
4 changes: 2 additions & 2 deletions examples/src/modelica_conference_2021.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -891,7 +891,7 @@
"outputs": [],
"source": [
"optim = Adam()\n",
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 1), optim; cb=()->callb(paramsNet)) "
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 1), optim; cb=()->callb(paramsNet)) "
]
},
{
Expand Down Expand Up @@ -950,7 +950,7 @@
"for run in 1:numRuns\n",
" @time for epoch in 1:numEpochs\n",
" @info \"Run: $(run)/$(numRuns) Epoch: $(epoch)/$(numEpochs)\"\n",
" FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), numIterations), optim; cb=()->callb(paramsNet))\n",
" FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), numIterations), optim; cb=()->callb(paramsNet))\n",
" end\n",
" flush(stderr)\n",
" flush(stdout)\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/src/simple_hybrid_CS.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -530,7 +530,7 @@
"paramsNet = FMIFlux.params(csNeuralFMU)\n",
"\n",
"optim = Adam()\n",
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 250), optim; cb=()->callb(paramsNet))"
"FMIFlux.train!(lossSum, csNeuralFMU, Iterators.repeated((), 250), optim; cb=()->callb(paramsNet))"
]
},
{
Expand Down
4 changes: 2 additions & 2 deletions examples/src/simple_hybrid_ME.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@
"paramsNet = FMIFlux.params(neuralFMU)\n",
"\n",
"optim = Adam()\n",
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 300), optim; cb=()->callb(paramsNet)) "
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 300), optim; cb=()->callb(paramsNet)) "
]
},
{
Expand Down Expand Up @@ -563,7 +563,7 @@
},
"outputs": [],
"source": [
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 1200), optim; cb=()->callb(paramsNet)) \n",
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 1200), optim; cb=()->callb(paramsNet)) \n",
"# plot results mass.s\n",
"solutionAfter = neuralFMU(x₀)\n",
"Plots.plot!(fig, solutionAfter; stateIndices=1:1, values=false, label=\"NeuralFMU (1500 epochs)\", linewidth=2)\n",
Expand Down
21 changes: 13 additions & 8 deletions src/neural.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1030,6 +1030,7 @@ function getComponent(nfmu::NeuralFMU)
return hasCurrentComponent(nfmu.fmu) ? getCurrentComponent(nfmu.fmu) : nothing
end

# ToDo: Separate this: NeuralFMU creation and solving!
"""
TODO: Signature, Arguments and Keyword-Arguments descriptions.
Expand Down Expand Up @@ -1287,7 +1288,11 @@ function (nfmu::ME_NeuralFMU)(x_start::Union{Array{<:Real}, Nothing} = nfmu.x0,
prob = ODEProblem{true}(ff, nfmu.x0, nfmu.tspan, p)

if isnothing(sensealg)
if isimplicit(solver)
if !isnothing(solver)

logWarning(nfmu.fmu, "No solver keyword detected for NeuralFMU.\nContinuous adjoint method is applied, which requires solving backward in time.\nThis might be not supported by every FMU.", 1)
sensealg = InterpolatingAdjoint(; autojacvec=ReverseDiffVJP(true), checkpointing=true)
elseif isimplicit(solver)
@assert !(alg_autodiff(solver) isa AutoForwardDiff) "Implicit solver using `autodiff=true` detected for NeuralFMU.\nThis is currently not supported, please use `autodiff=false` as solver keyword.\nExample: `Rosenbrock23(autodiff=false)` instead of `Rosenbrock23()`."

logWarning(nfmu.fmu, "Implicit solver detected for NeuralFMU.\nContinuous adjoint method is applied, which requires solving backward in time.\nThis might be not supported by every FMU.", 1)
Expand Down Expand Up @@ -1677,23 +1682,23 @@ end

"""
train!(loss, params::Union{Flux.Params, Zygote.Params}, data, optim::Flux.Optimise.AbstractOptimiser; gradient::Symbol=:Zygote, cb=nothing, chunk_size::Integer=64, printStep::Bool=false)
train!(loss, neuralFMU::Union{ME_NeuralFMU, CS_NeuralFMU}, data, optim; gradient::Symbol=:ReverseDiff, kwargs...)
A function analogous to Flux.train! but with additional features and explicit parameters (faster).
# Arguments
- `loss` a loss function in the format `loss(p)`
- `params` a object holding the parameters
- `neuralFMU` a object holding the neuralFMU with its parameters
- `data` the training data (or often an iterator)
- `optim` the optimizer used for training
# Keywords
- `gradient` a symbol determining the AD-library for gradient computation, available are `:ForwardDiff`, `:Zygote` and :ReverseDiff (default)
- `cb` a custom callback function that is called after every training step
- `chunk_size` the chunk size for AD using ForwardDiff (ignored for other AD-methods)
- `printStep` a boolean determining wheater the gradient min/max is printed after every step (for gradient debugging)
- `proceed_on_assert` a boolean that determins wheater to throw an ecxeption on error or proceed training and just print the error
- `numThreads` [WIP]: an integer determining how many threads are used for training (how many gradients are generated in parallel)
- `cb` a custom callback function that is called after every training step (default `nothing`)
- `chunk_size` the chunk size for AD using ForwardDiff (ignored for other AD-methods) (default `:auto_fmiflux`)
- `printStep` a boolean determining wheater the gradient min/max is printed after every step (for gradient debugging) (default `false`)
- `proceed_on_assert` a boolean that determins wheater to throw an ecxeption on error or proceed training and just print the error (default `false`)
- `multiThreading`: a boolean that determins if multiple gradients are generated in parallel (default `false`)
- `multiObjective`: set this if the loss function returns multiple values (multi objective optimization), currently gradients are fired to the optimizer one after another (default `false`)
"""
function train!(loss, neuralFMU::Union{ME_NeuralFMU, CS_NeuralFMU}, data, optim; gradient::Symbol=:ReverseDiff, kwargs...)
Expand Down

0 comments on commit 4a2de81

Please sign in to comment.