Skip to content

Commit

Permalink
Convert float32 -> float64 (default) (#24)
Browse files Browse the repository at this point in the history
  • Loading branch information
luisaforozco authored Mar 26, 2024
1 parent b998b71 commit 7361a7f
Show file tree
Hide file tree
Showing 7 changed files with 26 additions and 17 deletions.
9 changes: 9 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,15 @@

[![Build Status](https://github.com/DEEPDIP-project/CoupledNODE.jl/actions/workflows/CI.yml/badge.svg)](https://github.com/DEEPDIP-project/CoupledNODE.jl/actions/workflows/CI.yml)

## Installation


```julia
]
activate .
instantiate
```

### Context

This repo was created during [this co-working session](https://github.com/DEEPDIP-project/logs/blob/main/meetings/2024-02-20%20Coworking%20session.md).
Expand Down
4 changes: 2 additions & 2 deletions examples/01.00-Logistic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ f_NODE = create_f_NODE(NN, f_u; is_closed = true);
θ, st = Lux.setup(rng, f_NODE);

# * We define the NODE
trange = (0.0f0, 6.0f0)
trange = (0.0, 6.0)
u0 = [0.01]
full_NODE = NeuralODE(f_NODE, trange, Tsit5(), adaptive = false, dt = 0.001, saveat = 0.2);

Expand All @@ -73,7 +73,7 @@ myloss = create_randloss_MulDtO(u_experiment, nunroll = nunroll, nintervals = ni

# Second, we define this auxiliary NODE that will be used for training
dt = 0.01 # it has to be as fine as the data
t_train_range = (0.0f0, dt * (nunroll + 1)) # it has to be as long as unroll
t_train_range = (0.0, dt * (nunroll + 1)) # it has to be as long as unroll
training_NODE = NeuralODE(f_NODE,
t_train_range,
Tsit5(),
Expand Down
4 changes: 2 additions & 2 deletions examples/02.00-GrayScott.jl
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ f_CNODE = create_f_CNODE(F_u, G_v, grid; is_closed = false);
length(θ) == 0;

# We now do a short *burnout run* to get rid of the initial artifacts. This allows us to discard the transient dynamics and to have a good initial condition for the data collection run.
trange_burn = (0.0f0, 10.0f0);
trange_burn = (0.0, 10.0);
dt, saveat = (1e-2, 1);
full_CNODE = NeuralODE(f_CNODE,
trange_burn,
Expand All @@ -79,7 +79,7 @@ burnout_CNODE_solution = Array(full_CNODE(uv0, θ, st)[1]);

# We use the output of the *burnout run* to start a longer simulation
uv0 = burnout_CNODE_solution[:, :, end];
trange = (0.0f0, 8000.0f0);
trange = (0.0, 8000.0);
# the maximum suggested time step for GS is defined as `1/(4 * Dmax)`
dt, saveat = (1 / (4 * max(D_u, D_v)), 25);
full_CNODE = NeuralODE(f_CNODE, trange, Tsit5(), adaptive = false, dt = dt, saveat = saveat);
Expand Down
8 changes: 4 additions & 4 deletions examples/02.01-GrayScott.jl
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ f_CNODE = create_f_CNODE(F_u, G_v, grid; is_closed = false);

# **Burnout run:** to discard the results of the initial conditions.
# In this case we need 2 burnouts: first one with a relatively large time step and then another one with a smaller time step. This allow us to discard the transient dynamics and to have a good initial condition for the data collection run.
trange_burn = (0.0f0, 1.0f0)
trange_burn = (0.0, 1.0)
dt, saveat = (1e-2, 1)
burnout_CNODE = NeuralODE(f_CNODE,
trange_burn,
Expand All @@ -83,7 +83,7 @@ burnout_CNODE = NeuralODE(f_CNODE,
saveat = saveat);
burnout_CNODE_solution = Array(burnout_CNODE(uv0, θ_0, st_0)[1]);
# Second burnout with a smaller timestep
trange_burn = (0.0f0, 500.0f0)
trange_burn = (0.0, 500.0)
dt, saveat = (1 / (4 * max(D_u, D_v)), 100)
burnout_CNODE = NeuralODE(f_CNODE,
trange_burn,
Expand All @@ -95,7 +95,7 @@ burnout_CNODE_solution = Array(burnout_CNODE(burnout_CNODE_solution[:, :, end],

# Data collection run
uv0 = burnout_CNODE_solution[:, :, end];
trange = (0.0f0, 2000.0f0);
trange = (0.0, 2000.0);
dt, saveat = (1 / (4 * max(D_u, D_v)), 1);
GS_CNODE = NeuralODE(f_CNODE, trange, Tsit5(), adaptive = false, dt = dt, saveat = saveat);
GS_sim = Array(GS_CNODE(uv0, θ_0, st_0)[1]);
Expand Down Expand Up @@ -243,7 +243,7 @@ display(p)
# The learned weights look perfect, but let's check what happens if we use them to solve the GS model.

# Let's solve the system, for two different set of parameters, with the trained CNODE and compare with the exact solution
trange = (0.0f0, 500);
trange = (0.0, 500);
dt, saveat = (1, 5);

## Exact solution
Expand Down
12 changes: 6 additions & 6 deletions examples/02.02-GrayScott.jl
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ f_CNODE = create_f_CNODE(F_u, G_v, grid; is_closed = false);
θ_0, st_0 = Lux.setup(rng, f_CNODE);

# **Burnout run**
trange_burn = (0.0f0, 1.0f0)
trange_burn = (0.0, 1.0)
dt, saveat = (1e-2, 1)
burnout_CNODE = NeuralODE(f_CNODE,
trange_burn,
Expand All @@ -78,7 +78,7 @@ burnout_CNODE = NeuralODE(f_CNODE,
saveat = saveat);
burnout_CNODE_solution = Array(burnout_CNODE(uv0, θ_0, st_0)[1]);
# Second burnout with a larger timestep
trange_burn = (0.0f0, 800.0f0)
trange_burn = (0.0, 800.0)
dt, saveat = (1 / (4 * max(D_u, D_v)), 100)
burnout_CNODE = NeuralODE(f_CNODE,
trange_burn,
Expand All @@ -95,7 +95,7 @@ uv0 = burnout_CNODE_solution[:, :, end];
# 2. prevent instabilities while training
# However, this means that the simulation can not be long.
dt, saveat = (1 / (4 * max(D_u, D_v)), 0.001)
trange = (0.0f0, 50.0f0)
trange = (0.0, 50.0)
GS_CNODE = NeuralODE(f_CNODE, trange, Tsit5(), adaptive = false, dt = dt, saveat = saveat);
GS_sim = Array(GS_CNODE(uv0, θ_0, st_0)[1])

Expand Down Expand Up @@ -167,15 +167,15 @@ nsamples = 1;
# Also, it is important to solve for only the time interval thas is needed at each training step (corresponding to `nunroll` steps)
dt_train = 0.001;
saveat_train = saveat
t_train_range = (0.0f0, saveat_train * nunroll)
t_train_range = (0.0, saveat_train * nunroll)
training_CNODE = NeuralODE(f_closed_CNODE,
t_train_range,
Tsit5(),
adaptive = false,
dt = dt_train,
saveat = saveat_train);
# Let's also define a secondary auxiliary CNODE that will be used in case the previous one is unstable
t_train_range_2 = (0.0f0, saveat_train * 2)
t_train_range_2 = (0.0, saveat_train * 2)
t_train_range_2 = t_train_range
training_CNODE_2 = NeuralODE(f_closed_CNODE,
t_train_range_2,
Expand Down Expand Up @@ -245,7 +245,7 @@ display(p)

# ### Comparison: CNODE vs exact solutions

trange = (0.0f0, 600)
trange = (0.0, 600)
dt, saveat = (1, 5)

# Exact solution
Expand Down
4 changes: 2 additions & 2 deletions examples/02.03-GrayScott.jl
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ f_CNODE = create_f_CNODE(F_u, G_v, grid; is_closed = false)
θ, st = Lux.setup(rng, f_CNODE);

# We now do a short *burnout run* to get rid of the initial artifacts
trange_burn = (0.0f0, 10.0f0)
trange_burn = (0.0, 10.0)
dt_burn, saveat_burn = (1e-2, 1)
full_CNODE = NeuralODE(f_CNODE,
trange_burn,
Expand All @@ -82,7 +82,7 @@ burnout_CNODE_solution = Array(full_CNODE(uv0, θ, st)[1])
# **CNODE run**
# We use the output of the burnout to start a longer simulations
uv0 = burnout_CNODE_solution[:, :, end];
trange = (0.0f0, 7000.0f0)
trange = (0.0, 7000.0)
dt, saveat = (0.5, 20)
full_CNODE = NeuralODE(f_CNODE, trange, Tsit5(), adaptive = false, dt = dt, saveat = saveat)
untrained_CNODE_solution = Array(full_CNODE(uv0, θ, st)[1])
Expand Down
2 changes: 1 addition & 1 deletion examples/coupling_functions/functions_example.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
function observation()
f_ND = create_NODE_obs()
trange = (0.0f0, 6.0f0)
trange = (0.0, 6.0)
p0 = [0.01]
# define the observation from a NeuralODE
obs_node = NeuralODE(f_ND, trange, Tsit5(), adaptive = false, dt = 0.01, saveat = 0.01)
Expand Down

0 comments on commit 7361a7f

Please sign in to comment.