Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Differential Equation API #3590

Merged
merged 21 commits into from
Sep 3, 2019
Merged
Show file tree
Hide file tree
Changes from 16 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions RELEASE-NOTES.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
- Added `Matern12` covariance function for Gaussian processes. This is the Matern kernel with nu=1/2.
- Progressbar reports number of divergences in real time, when available [#3547](https://github.com/pymc-devs/pymc3/pull/3547).
- Sampling from variational approximation now allows for alternative trace backends [#3550].
- Add capabilities to do inference on parameters in a differential equation with `DifferentialEquation`.
Dpananos marked this conversation as resolved.
Show resolved Hide resolved

### Maintenance
- Moved math operations out of `Rice`, `TruncatedNormal`, `Triangular` and `ZeroInflatedNegativeBinomial` `random` methods. Math operations on values returned by `draw_values` might not broadcast well, and all the `size` aware broadcasting is left to `generate_samples`. Fixes [#3481](https://github.com/pymc-devs/pymc3/issues/3481) and [#3508](https://github.com/pymc-devs/pymc3/issues/3508)
Expand Down
570 changes: 570 additions & 0 deletions docs/source/notebooks/ODE_API_parameter_estimation.ipynb

Large diffs are not rendered by default.

296 changes: 99 additions & 197 deletions docs/source/notebooks/ODE_parameter_estimation.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion docs/source/notebooks/table_of_contents_examples.js
Original file line number Diff line number Diff line change
Expand Up @@ -53,5 +53,5 @@ Gallery.contents = {
"normalizing_flows_overview": "Variational Inference",
"gaussian-mixture-model-advi": "Variational Inference",
"GLM-hierarchical-advi-minibatch": "Variational Inference",
"ODE_parameter_estimation": "Inference in ODE models"
michaelosthege marked this conversation as resolved.
Show resolved Hide resolved
"ODE_API_parameter_estimation": "Inference in ODE models"
}
1 change: 1 addition & 0 deletions pymc3/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from .math import logaddexp, logsumexp, logit, invlogit, expand_packed_triangular, probit, invprobit
from .model import *
from .model_graph import model_to_graphviz
from . import ode
from .stats import *
from .sampling import *
from .step_methods import *
Expand Down
2 changes: 2 additions & 0 deletions pymc3/ode/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from . import utils
from .ode import DifferentialEquation
188 changes: 188 additions & 0 deletions pymc3/ode/ode.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
import numpy as np
import scipy
import theano
import theano.tensor as tt
from ..ode.utils import augment_system, ODEGradop


class DifferentialEquation(theano.Op):
"""
Specify an ordinary differential equation

.. math::
\dfrac{dy}{dt} = f(y,t,p) \quad y(t_0) = y_0

Parameters
----------

func : callable
Function specifying the differential equation
t0 : float
Time corresponding to the initial condition
times : array
Array of times at which to evaluate the solution of the differential equation.
n_states : int
Dimension of the differential equation. For scalar differential equations, n_states=1.
For vector valued differential equations, n_states = number of differential equations in the system.
n_odeparams : int
Number of parameters in the differential equation.

.. code-block:: python

def odefunc(y, t, p):
#Logistic differential equation
Dpananos marked this conversation as resolved.
Show resolved Hide resolved
return p[0]*y[0]*(1-y[0])
Dpananos marked this conversation as resolved.
Show resolved Hide resolved

times=np.arange(0.5, 5, 0.5)
Dpananos marked this conversation as resolved.
Show resolved Hide resolved

ode_model = DifferentialEquation(func=odefunc, t0=0, times=times, n_states=1, n_odeparams=1)
"""

__props__ = ("func", "t0", "times", "n_states", "n_odeparams")

def __init__(self, func, times, n_states, n_odeparams, t0=0):
if not callable(func):
raise ValueError("Argument func must be callable.")
if n_states < 1:
raise ValueError("Argument n_states must be at least 1.")
if n_odeparams <= 0:
raise ValueError("Argument n_odeparams must be positive.")

# Public
self.func = func
self.t0 = t0
self.times = tuple(times)
self.n_states = n_states
self.n_odeparams = n_odeparams

# Private
self._n = n_states
self._m = n_odeparams + n_states

self._augmented_times = np.insert(times, t0, 0)
self._augmented_func = augment_system(func, self._n, self._m)
self._sens_ic = self._make_sens_ic()

self._cached_y = None
self._cached_sens = None
self._cached_parameters = None

self._grad_op = ODEGradop(self._numpy_vsp)

def _make_sens_ic(self):
"""The sensitivity matrix will always have consistent form.
If the first n_odeparams entries of the parameters vector in the simulate call
correspond to ode paramaters, then the first n_odeparams columns in
the sensitivity matrix will be 0

If the last n_states entries of the paramters vector in the simulate call
correspond to initial conditions of the system,
then the last n_states columns of the sensitivity matrix should form
an identity matrix
"""

# Initialize the sensitivity matrix to be 0 everywhere
sens_matrix = np.zeros((self._n, self._m))

# Slip in the identity matrix in the appropirate place
sens_matrix[:, -self.n_states :] = np.eye(self.n_states)

# We need the sensitivity matrix to be a vector (see augmented_function)
# Ravel and return
dydp = sens_matrix.ravel()

return dydp

def _system(self, Y, t, p):
"""This is the function that will be passed to odeint.
Solves both ODE and sensitivities
Args:
Dpananos marked this conversation as resolved.
Show resolved Hide resolved
Y (vector): current state and current gradient state
t (scalar): current time
p (vector): parameters
Returns:
derivatives (vector): derivatives of state and gradient
"""

dydt, ddt_dydp = self._augmented_func(Y[: self._n], t, p, Y[self._n :])
derivatives = np.concatenate([dydt, ddt_dydp])
return derivatives

def _simulate(self, parameters):
# Initial condition comprised of state initial conditions and raveled
# sensitivity matrix
y0 = np.concatenate([parameters[self.n_odeparams :], self._sens_ic])

# perform the integration
sol = scipy.integrate.odeint(
func=self._system, y0=y0, t=self._augmented_times, args=(parameters,)
)
# The solution
y = sol[1:, : self.n_states]

# The sensitivities, reshaped to be a sequence of matrices
sens = sol[1:, self.n_states :].reshape(len(self.times), self._n, self._m)

return y, sens

def _cached_simulate(self, parameters):
if np.array_equal(np.array(parameters), self._cached_parameters):

return self._cached_y, self._cached_sens

return self._simulate(np.array(parameters))

def _state(self, parameters):
y, sens = self._cached_simulate(np.array(parameters))
self._cached_y, self._cached_sens, self._cached_parameters = y, sens, parameters
return y.ravel()

def _numpy_vsp(self, parameters, g):
_, sens = self._cached_simulate(np.array(parameters))

# Each element of sens is an nxm sensitivity matrix
# There is one sensitivity matrix per time step, making sens a (len(times), n_states, len(parameter))
# dimensional array. Reshaping the sens array in this way is like stacking each of the elements of sens on top
# of one another.
numpy_sens = sens.reshape((self.n_states * len(self.times), len(parameters)))
Dpananos marked this conversation as resolved.
Show resolved Hide resolved
# The dot product here is equivalent to np.einsum('ijk,jk', sens, g)
# if sens was not reshaped and if g had the same shape as yobs
return numpy_sens.T.dot(g)

def make_node(self, odeparams, y0):
if len(odeparams) != self.n_odeparams:
raise ValueError(
"odeparams has too many or too few parameters. Expected {a} parameter(s) but got {b}".format(
a=self.n_odeparams, b=len(odeparams)
)
)
if len(y0) != self.n_states:
raise ValueError(
"y0 has too many or too few parameters. Expected {a} parameter(s) but got {b}".format(
a=self.n_states, b=len(y0)
)
)

if np.ndim(odeparams) > 1:
odeparams = np.ravel(odeparams)
if np.ndim(y0) > 1:
y0 = np.ravel(y0)

odeparams = tt.as_tensor_variable(odeparams)
y0 = tt.as_tensor_variable(y0)
parameters = tt.concatenate([odeparams, y0])
return theano.Apply(self, [parameters], [parameters.type()])

def perform(self, node, inputs_storage, output_storage):
parameters = inputs_storage[0]
out = output_storage[0]
# get the numerical solution of ODE states
out[0] = self._state(parameters)

def grad(self, inputs, output_grads):
x = inputs[0]
g = output_grads[0]
# pass the VSP when asked for gradient
grad_op_apply = self._grad_op(x, g)

return [grad_op_apply]
79 changes: 79 additions & 0 deletions pymc3/ode/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import numpy as np
import theano
import theano.tensor as tt


def augment_system(ode_func, n, m):
"""Function to create augmented system.

Take a function which specifies a set of differential equations and return
a compiled function which allows for computation of gradients of the
differential equation's solition with repsect to the parameters.

Args:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These still have the old doc-format.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Merged and fixed on master.

ode_func (function): Differential equation. Returns array-like
n: Number of rows of the sensitivity matrix
m: Number of columns of the sensitivity matrix

Returns:
system (function): Augemted system of differential equations.

"""

# Present state of the system
t_y = tt.vector("y", dtype=theano.config.floatX)
t_y.tag.test_value = np.zeros((n,))
# Parameter(s). Should be vector to allow for generaliztion to multiparameter
# systems of ODEs. Is m dimensional because it includes all ode parameters as well as initical conditions
t_p = tt.vector("p", dtype=theano.config.floatX)
t_p.tag.test_value = np.zeros((m,))
# Time. Allow for non-automonous systems of ODEs to be analyzed
t_t = tt.scalar("t", dtype=theano.config.floatX)
t_t.tag.test_value = 2.459

# Present state of the gradients:
# Will always be 0 unless the parameter is the inital condition
# Entry i,j is partial of y[i] wrt to p[j]
dydp_vec = tt.vector("dydp", dtype=theano.config.floatX)
dydp_vec.tag.test_value = np.zeros(n * m)

dydp = dydp_vec.reshape((n, m))

# Stack the results of the ode_func
f_tensor = tt.stack(ode_func(t_y, t_t, t_p))

# Now compute gradients
J = tt.jacobian(f_tensor, t_y)

Jdfdy = tt.dot(J, dydp)

grad_f = tt.jacobian(f_tensor, t_p)

# This is the time derivative of dydp
ddt_dydp = (Jdfdy + grad_f).flatten()

system = theano.function(
inputs=[t_y, t_t, t_p, dydp_vec],
outputs=[f_tensor, ddt_dydp],
on_unused_input="ignore",
)

return system


class ODEGradop(theano.Op):
def __init__(self, numpy_vsp):
self._numpy_vsp = numpy_vsp

def make_node(self, x, g):

x = theano.tensor.as_tensor_variable(x)
g = theano.tensor.as_tensor_variable(g)
node = theano.Apply(self, [x, g], [g.type()])
return node

def perform(self, node, inputs_storage, output_storage):
x = inputs_storage[0]
g = inputs_storage[1]
out = output_storage[0]
out[0] = self._numpy_vsp(x, g) # get the numerical VSP
Loading