Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update apis and tests #246

Merged
merged 1 commit into from
Aug 11, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion brainpy/base/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,6 @@ def vars(self, method='absolute', level=-1, include_self=True):
v = getattr(node, k)
if isinstance(v, math.Variable):
if k not in node._excluded_vars:
# if not k.startswith('_') and not k.endswith('_'):
gather[f'{node_path}.{k}' if node_path else k] = v
gather.update({f'{node_path}.{k}': v for k, v in node.implicit_vars.items()})
return gather
Expand Down
2 changes: 2 additions & 0 deletions brainpy/base/tests/test_collector.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,6 +273,8 @@ def test_net_vars_2():

def test_hidden_variables():
class BPClass(bp.base.Base):
_excluded_vars = ('_rng_', )

def __init__(self):
super(BPClass, self).__init__()

Expand Down
80 changes: 49 additions & 31 deletions brainpy/dyn/neurons/biological_models.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-

from typing import Union, Callable
from typing import Union, Callable, Optional

import brainpy.math as bm
from brainpy.dyn.base import NeuGroup
Expand Down Expand Up @@ -204,9 +204,9 @@ def __init__(
V_th: Union[float, Tensor, Initializer, Callable] = 20.,
C: Union[float, Tensor, Initializer, Callable] = 1.0,
V_initializer: Union[Initializer, Callable, Tensor] = Uniform(-70, -60.),
m_initializer: Union[Initializer, Callable, Tensor] = OneInit(0.5),
h_initializer: Union[Initializer, Callable, Tensor] = OneInit(0.6),
n_initializer: Union[Initializer, Callable, Tensor] = OneInit(0.32),
m_initializer: Optional[Union[Initializer, Callable, Tensor]] = None,
h_initializer: Optional[Union[Initializer, Callable, Tensor]] = None,
n_initializer: Optional[Union[Initializer, Callable, Tensor]] = None,
noise: Union[float, Tensor, Initializer, Callable] = None,
method: str = 'exp_auto',
name: str = None,
Expand All @@ -233,20 +233,29 @@ def __init__(
self.noise = init_noise(noise, self.varshape, num_vars=4)

# initializers
check_initializer(m_initializer, 'm_initializer', allow_none=False)
check_initializer(h_initializer, 'h_initializer', allow_none=False)
check_initializer(n_initializer, 'n_initializer', allow_none=False)
check_initializer(m_initializer, 'm_initializer', allow_none=True)
check_initializer(h_initializer, 'h_initializer', allow_none=True)
check_initializer(n_initializer, 'n_initializer', allow_none=True)
check_initializer(V_initializer, 'V_initializer', allow_none=False)
self._m_initializer = m_initializer
self._h_initializer = h_initializer
self._n_initializer = n_initializer
self._V_initializer = V_initializer

# variables
self.m = variable(self._m_initializer, mode, self.varshape)
self.h = variable(self._h_initializer, mode, self.varshape)
self.n = variable(self._n_initializer, mode, self.varshape)
self.V = variable(self._V_initializer, mode, self.varshape)
if self._m_initializer is None:
self.m = bm.Variable(self.m_inf(self.V.value))
else:
self.m = variable(self._m_initializer, mode, self.varshape)
if self._h_initializer is None:
self.h = bm.Variable(self.h_inf(self.V.value))
else:
self.h = variable(self._h_initializer, mode, self.varshape)
if self._n_initializer is None:
self.n = bm.Variable(self.n_inf(self.V.value))
else:
self.n = variable(self._n_initializer, mode, self.varshape)
self.input = variable(bm.zeros, mode, self.varshape)
self.spike = variable(lambda s: bm.zeros(s, dtype=bool), mode, self.varshape)

Expand All @@ -256,32 +265,41 @@ def __init__(
else:
self.integral = sdeint(method=method, f=self.derivative, g=self.noise)

# m channel
m_alpha = lambda self, V: 0.1 * (V + 40) / (1 - bm.exp(-(V + 40) / 10))
m_beta = lambda self, V: 4.0 * bm.exp(-(V + 65) / 18)
m_inf = lambda self, V: self.m_alpha(V) / (self.m_alpha(V) + self.m_beta(V))
dm = lambda self, m, t, V: self.m_alpha(V) * (1 - m) - self.m_beta(V) * m

# h channel
h_alpha = lambda self, V: 0.07 * bm.exp(-(V + 65) / 20.)
h_beta = lambda self, V: 1 / (1 + bm.exp(-(V + 35) / 10))
h_inf = lambda self, V: self.h_alpha(V) / (self.h_alpha(V) + self.h_beta(V))
dh = lambda self, h, t, V: self.h_alpha(V) * (1 - h) - self.h_beta(V) * h

# n channel
n_alpha = lambda self, V: 0.01 * (V + 55) / (1 - bm.exp(-(V + 55) / 10))
n_beta = lambda self, V: 0.125 * bm.exp(-(V + 65) / 80)
n_inf = lambda self, V: self.n_alpha(V) / (self.n_alpha(V) + self.n_beta(V))
dn = lambda self, n, t, V: self.n_alpha(V) * (1 - n) - self.n_beta(V) * n

def reset_state(self, batch_size=None):
self.m.value = variable(self._m_initializer, batch_size, self.varshape)
self.h.value = variable(self._h_initializer, batch_size, self.varshape)
self.n.value = variable(self._n_initializer, batch_size, self.varshape)
self.V.value = variable(self._V_initializer, batch_size, self.varshape)
if self._m_initializer is None:
self.m.value = self.m_inf(self.V.value)
else:
self.m.value = variable(self._m_initializer, batch_size, self.varshape)
if self._h_initializer is None:
self.h.value = self.h_inf(self.V.value)
else:
self.h.value = variable(self._h_initializer, batch_size, self.varshape)
if self._n_initializer is None:
self.n.value = self.n_inf(self.V.value)
else:
self.n.value = variable(self._n_initializer, batch_size, self.varshape)
self.input.value = variable(bm.zeros, batch_size, self.varshape)
self.spike.value = variable(lambda s: bm.zeros(s, dtype=bool), batch_size, self.varshape)

def dm(self, m, t, V):
alpha = 0.1 * (V + 40) / (1 - bm.exp(-(V + 40) / 10))
beta = 4.0 * bm.exp(-(V + 65) / 18)
dmdt = alpha * (1 - m) - beta * m
return dmdt

def dh(self, h, t, V):
alpha = 0.07 * bm.exp(-(V + 65) / 20.)
beta = 1 / (1 + bm.exp(-(V + 35) / 10))
dhdt = alpha * (1 - h) - beta * h
return dhdt

def dn(self, n, t, V):
alpha = 0.01 * (V + 55) / (1 - bm.exp(-(V + 55) / 10))
beta = 0.125 * bm.exp(-(V + 65) / 80)
dndt = alpha * (1 - n) - beta * n
return dndt

def dV(self, V, t, m, h, n, I_ext):
I_Na = (self.gNa * m ** 3.0 * h) * (V - self.ENa)
I_K = (self.gK * n ** 4.0) * (V - self.EK)
Expand Down
5 changes: 5 additions & 0 deletions brainpy/visualization/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,11 @@ def animate_2D(values,
frame_delay=frame_delay, frame_step=frame_step, title_size=title_size,
figsize=figsize, gif_dpi=gif_dpi, video_fps=video_fps, save_path=save_path, show=show)

@staticmethod
def remove_axis(ax, *pos):
from .plots import remove_axis
return remove_axis(ax, *pos)

@staticmethod
def plot_style1(fontsize=22,
axes_edgecolor='black',
Expand Down
10 changes: 10 additions & 0 deletions brainpy/visualization/plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
'raster_plot',
'animate_2D',
'animate_1D',
'remove_axis',
]


Expand Down Expand Up @@ -504,3 +505,12 @@ def frame(t):
else:
anim_result.save(save_path + '.mp4', writer='ffmpeg', fps=video_fps, bitrate=3000)
return fig


def remove_axis(ax, *pos):
for p in pos:
if p not in ['left', 'right', 'top', 'bottom']:
raise ValueError
ax.spine[p].set_visible(False)