Skip to content

Commit

Permalink
Merge #218
Browse files Browse the repository at this point in the history
218: Compatibility with new EKP, version and updated Lorenz example r=odunbar a=odunbar

<!--- THESE LINES ARE COMMENTED -->
## Purpose 
<!--- One sentence to describe the purpose of this PR, refer to any linked issues:
#14 -- this will link to issue 14
Closes #2 -- this will automatically close issue 2 on PR merge
-->

## Content
<!---  specific tasks that are currently complete 
- Solution implemented
-->
- `get_logpdf` -> `logpdf` so that MCMC tests pass
- updated the EKP Project toml. Also add compats for ProgressBars, StableRNGs 
- added option to set `"scheduler" => XYZ` in `optimizer_options` for RF emulators
- removed Lorenz bugs in observational noise 
- added a scaling tuner for easily expanding/narrowing priors in RF (currently one for input and one for output space)
- added some diagnostic information to compare the prior support and optimal parameters to aid with the tuning.

<!---
Review checklist

I have:
- followed the codebase contribution guide: https://clima.github.io/ClimateMachine.jl/latest/Contributing/
- followed the style guide: https://clima.github.io/ClimateMachine.jl/latest/DevDocs/CodeStyle/
- followed the documentation policy: https://github.com/CliMA/policies/wiki/Documentation-Policy
- checked that this PR does not duplicate an open PR.

In the Content, I have included 
- relevant unit tests, and integration tests, 
- appropriate docstrings on all functions, structs, and modules, and included relevant documentation.

-->

----
- [ ] I have read and checked the items on the review checklist.


Co-authored-by: odunbar <odunbar@caltech.edu>
  • Loading branch information
bors[bot] and odunbar authored Jun 13, 2023
2 parents 1846d06 + 05c90e0 commit cf3f7d9
Show file tree
Hide file tree
Showing 12 changed files with 355 additions and 131 deletions.
6 changes: 4 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,16 @@ AbstractMCMC = "3.3, 4"
AdvancedMH = "0.6, 0.7"
Conda = "1.7"
Distributions = "0.24, 0.25"
EnsembleKalmanProcesses = "1.1"
DocStringExtensions = "0.8, 0.9"
EnsembleKalmanProcesses = "0.8, 0.9, 0.10, 0.11, 0.12, 0.13, 0.14, 1.0"
GaussianProcesses = "0.12"
MCMCChains = "4.14, 5, 6"
ProgressBars = "1"
PyCall = "1.93"
RandomFeatures = "0.3"
ScikitLearn = "0.6, 0.7"
StableRNGs = "1"
StatsBase = "0.33, 0.34"
RandomFeatures = "0.3"
julia = "1.6, 1.7, 1.8"

[extras]
Expand Down
10 changes: 8 additions & 2 deletions examples/Emulator/RandomFeature/scalar_optimize_and_plot_RF.jl
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,14 @@ end

# setup random features
n_features = 180

srfi = ScalarRandomFeatureInterface(n_features, p, diagonalize_input = diagonalize_input)
optimizer_options = Dict("n_iteration" => 20, "prior_in_scale" => 0.1, "verbose" => true) #"Max" iterations (may do less)

srfi = ScalarRandomFeatureInterface(
n_features,
p,
diagonalize_input = diagonalize_input,
optimizer_options = optimizer_options,
)
emulator = Emulator(srfi, iopairs, obs_noise_cov = Σ, normalize_inputs = true)
println("build RF with $n training points and $(n_features) random features.")

Expand Down
27 changes: 23 additions & 4 deletions examples/Emulator/RandomFeature/vector_optimize_and_plot_RF.jl
Original file line number Diff line number Diff line change
Expand Up @@ -133,18 +133,37 @@ function main()

# setup random features
n_features = 200
if case ["svd-diag", "svd-nondiag"]
optimizer_options =
Dict("n_iteration" => 20, "prior_in_scale" => 0.01, "prior_out_scale" => 1.0, "verbose" => true) #"Max" iterations (may do less)
else # without svd
optimizer_options =
Dict("n_iteration" => 20, "prior_in_scale" => 0.01, "prior_out_scale" => 0.01, "verbose" => true) #"Max" iterations (may do less)
end

if case == "svd-diag"
vrfi = VectorRandomFeatureInterface(n_features, p, d, diagonalize_output = true)
vrfi = VectorRandomFeatureInterface(
n_features,
p,
d,
diagonalize_output = true,
optimizer_options = optimizer_options,
)
emulator = Emulator(vrfi, iopairs, obs_noise_cov = Σ, normalize_inputs = true)
elseif case == "svd-nondiag"
vrfi = VectorRandomFeatureInterface(n_features, p, d)
vrfi = VectorRandomFeatureInterface(n_features, p, d, optimizer_options = optimizer_options)
emulator = Emulator(vrfi, iopairs, obs_noise_cov = Σ, normalize_inputs = true)
elseif case == "nosvd-diag"
vrfi = VectorRandomFeatureInterface(n_features, p, d, diagonalize_output = true)
vrfi = VectorRandomFeatureInterface(
n_features,
p,
d,
diagonalize_output = true,
optimizer_options = optimizer_options,
)
emulator = Emulator(vrfi, iopairs, obs_noise_cov = Σ, normalize_inputs = true, decorrelate = false)
elseif case == "nosvd-nondiag"
vrfi = VectorRandomFeatureInterface(n_features, p, d)
vrfi = VectorRandomFeatureInterface(n_features, p, d, optimizer_options = optimizer_options)
emulator = Emulator(vrfi, iopairs, obs_noise_cov = Σ, normalize_inputs = true, decorrelate = false)
end
println("build RF with $n training points and $(n_features) random features.")
Expand Down
71 changes: 35 additions & 36 deletions examples/Lorenz/GModel_common.jl
Original file line number Diff line number Diff line change
Expand Up @@ -128,56 +128,64 @@ end
# tend: End of simulation Float64(1), nstep:
function lorenz_solve(settings::LSettings, params::LParams)
# Initialize
nstep = Int32(ceil(settings.tend / settings.dt))
nstep = Int32(ceil((settings.tend - settings.t_start) / settings.dt))
xn = zeros(Float64, settings.N, nstep)
t = zeros(Float64, nstep)
# Initial perturbation
X = fill(Float64(0.0), settings.N)
X = X + settings.Fp
Xbuffer = zeros(4, settings.N)
# March forward in time
for j in 1:nstep
t[j] = settings.dt * j
t[j] = settings.t_start + settings.dt * j
#use view to update a slice
# RK4! modifies first and last arguments
if settings.dynamics == 1
X = RK4(X, settings.dt, settings.N, params.F)
RK4!(X, settings.dt, settings.N, params.F, Xbuffer)
elseif settings.dynamics == 2
F_local = params.F + params.A * sin(params.ω * t[j])
X = RK4(X, settings.dt, settings.N, F_local)
RK4!(X, settings.dt, settings.N, F_local, Xbuffer)
end
xn[:, j] = X
xn[:, j] += X
end
# Output
return xn, t

end

# Lorenz 96 system
# f = dx/dt
# f = dx/dt overwriting first argument
# Inputs: x: state, N: longitude steps, F: forcing
function f(x, N, F)
f = zeros(Float64, N)
function f!(xnew, x, N, F)

# Loop over N positions
for i in 3:(N - 1)
f[i] = -x[i - 2] * x[i - 1] + x[i - 1] * x[i + 1] - x[i] + F
xnew[i] = -x[i - 2] * x[i - 1] + x[i - 1] * x[i + 1] - x[i] + F
end
# Periodic boundary conditions
f[1] = -x[N - 1] * x[N] + x[N] * x[2] - x[1] + F
f[2] = -x[N] * x[1] + x[1] * x[3] - x[2] + F
f[N] = -x[N - 2] * x[N - 1] + x[N - 1] * x[1] - x[N] + F
xnew[1] = -x[N - 1] * x[N] + x[N] * x[2] - x[1] + F
xnew[2] = -x[N] * x[1] + x[1] * x[3] - x[2] + F
xnew[N] = -x[N - 2] * x[N - 1] + x[N - 1] * x[1] - x[N] + F
# Output
return f
return nothing
end

# RK4 solve
function RK4(xold, dt, N, F)
# Predictor steps
k1 = f(xold, N, F)
k2 = f(xold + k1 * dt / 2.0, N, F)
k3 = f(xold + k2 * dt / 2.0, N, F)
k4 = f(xold + k3 * dt, N, F)
# RK4 solve, overwriting first argument
function RK4!(xold, dt, N, F, buffer)
#buffer is 4 x N zeros
@assert size(buffer) == (4, N)

# Predictor steps updates the final argument
# must use view to update a slice in-place
f!(view(buffer, 1, :), xold, N, F) #k1
f!(view(buffer, 2, :), xold + buffer[1, :] * dt / 2.0, N, F) #k2
f!(view(buffer, 3, :), xold + buffer[2, :] * dt / 2.0, N, F) #k3
f!(view(buffer, 4, :), xold + buffer[3, :] * dt, N, F) #k4
# Step
xnew = xold + (dt / 6.0) * (k1 + 2.0 * k2 + 2.0 * k3 + k4)
# Output
return xnew
#xnew = xold + (dt / 6.0) * (k1 + 2.0 * k2 + 2.0 * k3 + k4)
xold .+= (dt / 6.0) * (buffer[1, :] + 2.0 * buffer[2, :] + 2.0 * buffer[3, :] + buffer[4, :])

return nothing
end

function regression(X, y)
Expand Down Expand Up @@ -216,7 +224,8 @@ end
###############################
function stats(settings, xn, t)
# Define averaging indices range
indices = findall(x -> (x > settings.t_start) && (x < settings.t_start + settings.T), t)
indices = findall(x -> (x > settings.t_start) && (x < settings.tend), t)

# Define statistics of interest
if settings.stats_type == 1 # Mean
# Average in time and over longitude
Expand Down Expand Up @@ -317,25 +326,15 @@ function stats(settings, xn, t)
var_slide = zeros(N)
t_slide = zeros(nt, N)
for i in 1:N
var_slide[i] = var(vcat(xn[:, indices[((i - 1) * nt + 1):(i * nt)]]...))
var_slide[i] = var(xn[:, indices[((i - 1) * nt + 1):(i * nt)]])
t_slide[:, i] = t[((i - 1) * nt + 1):(i * nt)]
end
# Polynomial fit over a batch of sliding windows
Tfit = Int64(settings.Tfit)
Tfit = Int64(settings.Tfit) # (a multiple of Ts)
NT = Int64(N / Tfit)
a1 = zeros(NT)
a2 = zeros(NT)
t_start = zeros(NT)
y = zeros(Tfit, NT)
ty = zeros(Tfit, NT)
ym = zeros(NT)
for i in 1:NT
ind_low = (i - 1) * Tfit + 1
ind_high = i * Tfit
t_start[i] = t_slide[1, ind_low]
ty[:, i] = t_slide[1, ind_low:ind_high] .- t_start[i]
a1[i], a2[i] = regression(ty[:, i], var_slide[ind_low:ind_high])
y[:, i] = a1[i] .* ty[:, i] .+ a2[i]
ym[i] = mean(var_slide[ind_low:ind_high])
end
# Combine
Expand Down
Loading

0 comments on commit cf3f7d9

Please sign in to comment.