Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
219 changes: 110 additions & 109 deletions pymc3/distributions/continuous.py

Large diffs are not rendered by default.

68 changes: 35 additions & 33 deletions pymc3/distributions/discrete.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import theano.tensor as tt
from scipy import stats

from pymc3.theanof import floatX, intX
from pymc3.util import get_variable_name
from .dist_math import bound, factln, binomln, betaln, logpow
from .distribution import Discrete, draw_values, generate_samples, reshape_sampled
Expand Down Expand Up @@ -42,9 +43,9 @@ class Binomial(Discrete):

def __init__(self, n, p, *args, **kwargs):
super(Binomial, self).__init__(*args, **kwargs)
self.n = n = tt.as_tensor_variable(n)
self.p = p = tt.as_tensor_variable(p)
self.mode = tt.cast(tround(n * p), self.dtype)
self.n = intX(n)
self.p = floatX(p)
self.mode = tt.cast(tround(self.n * self.p), self.dtype)

def random(self, point=None, size=None, repeat=None):
n, p = draw_values([self.n, self.p], point=point)
Expand Down Expand Up @@ -101,10 +102,11 @@ class BetaBinomial(Discrete):

def __init__(self, alpha, beta, n, *args, **kwargs):
super(BetaBinomial, self).__init__(*args, **kwargs)
self.alpha = alpha = tt.as_tensor_variable(alpha)
self.beta = beta = tt.as_tensor_variable(beta)
self.n = n = tt.as_tensor_variable(n)
self.mode = tt.cast(tround(alpha / (alpha + beta)), 'int8')
self.alpha = floatX(alpha)
self.beta = floatX(beta)
self.n = intX(n)
self.mode = tt.cast(
tround(self.alpha / (self.alpha + self.beta)), 'int8')

def _random(self, alpha, beta, n, size=None):
size = size or 1
Expand Down Expand Up @@ -166,8 +168,8 @@ class Bernoulli(Discrete):

def __init__(self, p, *args, **kwargs):
super(Bernoulli, self).__init__(*args, **kwargs)
self.p = p = tt.as_tensor_variable(p)
self.mode = tt.cast(tround(p), 'int8')
self.p = floatX(p)
self.mode = tt.cast(tround(self.p), 'int8')

def random(self, point=None, size=None, repeat=None):
p = draw_values([self.p], point=point)
Expand Down Expand Up @@ -207,8 +209,8 @@ class DiscreteWeibull(Discrete):
def __init__(self, q, beta, *args, **kwargs):
super(DiscreteWeibull, self).__init__(*args, defaults=['median'], **kwargs)

self.q = q = tt.as_tensor_variable(q)
self.beta = beta = tt.as_tensor_variable(beta)
self.q = floatX(q)
self.beta = floatX(beta)

self.median = self._ppf(0.5)

Expand Down Expand Up @@ -282,8 +284,8 @@ class Poisson(Discrete):

def __init__(self, mu, *args, **kwargs):
super(Poisson, self).__init__(*args, **kwargs)
self.mu = mu = tt.as_tensor_variable(mu)
self.mode = tt.floor(mu).astype('int32')
self.mu = floatX(mu)
self.mode = tt.floor(intX(self.mu))

def random(self, point=None, size=None, repeat=None):
mu = draw_values([self.mu], point=point)
Expand Down Expand Up @@ -336,9 +338,9 @@ class NegativeBinomial(Discrete):

def __init__(self, mu, alpha, *args, **kwargs):
super(NegativeBinomial, self).__init__(*args, **kwargs)
self.mu = mu = tt.as_tensor_variable(mu)
self.alpha = alpha = tt.as_tensor_variable(alpha)
self.mode = tt.floor(mu).astype('int32')
self.mu = floatX(mu)
self.alpha = floatX(alpha)
self.mode = tt.floor(intX(self.mu))

def random(self, point=None, size=None, repeat=None):
mu, alpha = draw_values([self.mu, self.alpha], point=point)
Expand Down Expand Up @@ -394,7 +396,7 @@ class Geometric(Discrete):

def __init__(self, p, *args, **kwargs):
super(Geometric, self).__init__(*args, **kwargs)
self.p = p = tt.as_tensor_variable(p)
self.p = floatX(p)
self.mode = 1

def random(self, point=None, size=None, repeat=None):
Expand Down Expand Up @@ -438,17 +440,17 @@ class DiscreteUniform(Discrete):

def __init__(self, lower, upper, *args, **kwargs):
super(DiscreteUniform, self).__init__(*args, **kwargs)
self.lower = tt.floor(lower).astype('int32')
self.upper = tt.floor(upper).astype('int32')
self.lower = intX(np.floor(lower))
self.upper = intX(np.floor(upper))
self.mode = tt.maximum(
tt.floor((upper + lower) / 2.).astype('int32'), self.lower)
intX(np.floor((upper + lower) / 2.)), self.lower)

def _random(self, lower, upper, size=None):
# This way seems to be the only to deal with lower and upper
# as array-like.
samples = stats.uniform.rvs(lower, upper - lower - np.finfo(float).eps,
size=size)
return np.floor(samples).astype('int32')
return intX(np.floor(samples))

def random(self, point=None, size=None, repeat=None):
lower, upper = draw_values([self.lower, self.upper], point=point)
Expand Down Expand Up @@ -498,9 +500,9 @@ def __init__(self, p, *args, **kwargs):
self.k = tt.shape(p)[-1].tag.test_value
except AttributeError:
self.k = tt.shape(p)[-1]
self.p = p = tt.as_tensor_variable(p)
p = floatX(p)
self.p = (p.T / tt.sum(p, -1)).T
self.mode = tt.argmax(p)
self.mode = tt.argmax(self.p)

def random(self, point=None, size=None, repeat=None):
def random_choice(k, *args, **kwargs):
Expand Down Expand Up @@ -556,7 +558,7 @@ class Constant(Discrete):

def __init__(self, c, *args, **kwargs):
super(Constant, self).__init__(*args, **kwargs)
self.mean = self.median = self.mode = self.c = c = tt.as_tensor_variable(c)
self.mean = self.median = self.mode = self.c = floatX(c)

def random(self, point=None, size=None, repeat=None):
c = draw_values([self.c], point=point)
Expand Down Expand Up @@ -616,8 +618,8 @@ class ZeroInflatedPoisson(Discrete):

def __init__(self, psi, theta, *args, **kwargs):
super(ZeroInflatedPoisson, self).__init__(*args, **kwargs)
self.theta = theta = tt.as_tensor_variable(theta)
self.psi = psi = tt.as_tensor_variable(psi)
self.theta = floatX(theta)
self.psi = floatX(psi)
self.pois = Poisson.dist(theta)
self.mode = self.pois.mode

Expand Down Expand Up @@ -682,9 +684,9 @@ class ZeroInflatedBinomial(Discrete):

def __init__(self, psi, n, p, *args, **kwargs):
super(ZeroInflatedBinomial, self).__init__(*args, **kwargs)
self.n = n = tt.as_tensor_variable(n)
self.p = p = tt.as_tensor_variable(p)
self.psi = psi = tt.as_tensor_variable(psi)
self.n = n = intX(n)
self.p = p = floatX(p)
self.psi = psi = floatX(psi)
self.bin = Binomial.dist(n, p)
self.mode = self.bin.mode

Expand Down Expand Up @@ -756,10 +758,10 @@ class ZeroInflatedNegativeBinomial(Discrete):

def __init__(self, psi, mu, alpha, *args, **kwargs):
super(ZeroInflatedNegativeBinomial, self).__init__(*args, **kwargs)
self.mu = mu = tt.as_tensor_variable(mu)
self.alpha = alpha = tt.as_tensor_variable(alpha)
self.psi = psi = tt.as_tensor_variable(psi)
self.nb = NegativeBinomial.dist(mu, alpha)
self.mu = floatX(mu)
self.alpha = floatX(alpha)
self.psi = floatX(psi)
self.nb = NegativeBinomial.dist(self.mu, self.alpha)
self.mode = self.nb.mode

def random(self, point=None, size=None, repeat=None):
Expand Down
9 changes: 5 additions & 4 deletions pymc3/distributions/dist_math.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

from .special import gammaln
from ..math import logdet as _logdet
from pymc3.theanof import floatX
from pymc3.theanof import floatX, intX

f = floatX
c = - .5 * np.log(2. * np.pi)
Expand Down Expand Up @@ -44,7 +44,7 @@ def bound(logp, *conditions, **kwargs):
else:
alltrue = alltrue_scalar

return tt.switch(alltrue(conditions), logp, -np.inf)
return tt.switch(alltrue(conditions), logp, -f(np.inf))


def alltrue_elemwise(vals):
Expand All @@ -62,12 +62,13 @@ def logpow(x, m):
"""
Calculates log(x**m) since m*log(x) will fail when m, x = 0.
"""
m = f(m)
# return m * log(x)
return tt.switch(tt.eq(x, 0), -np.inf, m * tt.log(x))


def factln(n):
return gammaln(n + 1)
return gammaln(n + intX(1))


def binomln(n, k):
Expand Down Expand Up @@ -256,7 +257,7 @@ def MvNormalLogp():
result += f(2) * n * tt.sum(tt.log(diag))
result += (delta_trans ** f(2)).sum()
result = f(-.5) * result
logp = tt.switch(ok, result, -np.inf)
logp = tt.switch(ok, result, -f(np.inf))

def dlogp(inputs, gradients):
g_logp, = gradients
Expand Down
13 changes: 7 additions & 6 deletions pymc3/distributions/mixture.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import numpy as np
import theano.tensor as tt

from pymc3.theanof import floatX
from pymc3.util import get_variable_name
from ..math import logsumexp
from .dist_math import bound
Expand Down Expand Up @@ -42,7 +43,7 @@ class Mixture(Distribution):
def __init__(self, w, comp_dists, *args, **kwargs):
shape = kwargs.pop('shape', ())

self.w = w = tt.as_tensor_variable(w)
self.w = floatX(w)
self.comp_dists = comp_dists

defaults = kwargs.pop('defaults', [])
Expand All @@ -53,7 +54,7 @@ def __init__(self, w, comp_dists, *args, **kwargs):
dtype = kwargs.pop('dtype', 'float64')

try:
self.mean = (w * self._comp_means()).sum(axis=-1)
self.mean = (self.w * self._comp_means()).sum(axis=-1)

if 'mean' not in defaults:
defaults.append('mean')
Expand All @@ -63,7 +64,7 @@ def __init__(self, w, comp_dists, *args, **kwargs):
try:
comp_modes = self._comp_modes()
comp_mode_logps = self.logp(comp_modes)
self.mode = comp_modes[tt.argmax(w * comp_mode_logps, axis=-1)]
self.mode = comp_modes[tt.argmax(self.w * comp_mode_logps, axis=-1)]

if 'mode' not in defaults:
defaults.append('mode')
Expand Down Expand Up @@ -169,9 +170,9 @@ class NormalMixture(Mixture):
def __init__(self, w, mu, *args, **kwargs):
_, sd = get_tau_sd(tau=kwargs.pop('tau', None),
sd=kwargs.pop('sd', None))
self.mu = mu = tt.as_tensor_variable(mu)
self.sd = sd = tt.as_tensor_variable(sd)
super(NormalMixture, self).__init__(w, Normal.dist(mu, sd=sd),
self.mu = floatX(mu)
self.sd = floatX(sd)
super(NormalMixture, self).__init__(w, Normal.dist(self.mu, sd=self.sd),
*args, **kwargs)

def _repr_latex_(self, name=None, dist=None):
Expand Down
Loading