diff --git a/docs/build/html/_modules/index.html b/docs/build/html/_modules/index.html index c0084d2..7547275 100644 --- a/docs/build/html/_modules/index.html +++ b/docs/build/html/_modules/index.html @@ -1,17 +1,20 @@ - +
+
import glob
import xml.etree.ElementTree as ET
import array
-from pathlib import Path
+from pathlib import Path
import numpy as np
-from .external import tifffile
+from .external import tifffile
package_directory = os.path.dirname(os.path.abspath(__file__))
@@ -57,21 +58,22 @@ Source code for utils2p.main
pass
+
def _node_crawler(node, *args):
if len(args) == 0:
return node
elif len(args) == 1 and args[0] in node.attrib.keys():
- return node.attrib[args[0]]
+ return node.attrib[args[0]]
if len(node) == 0:
- raise ValueError(f"Hit dead end {node} has no children.")
+ raise ValueError(f"Hit dead end {node} has no children.")
return [_node_crawler(child, *args[1:]) for child in node.findall(args[0])]
+
class _XMLFile:
"""
Base class for xml based Metadata.
"""
-
- def __init__(self, path):
+ def __init__(self, path):
self.path = path
self.tree = ET.parse(path)
self.root = self.tree.getroot()
@@ -106,23 +108,23 @@ Source code for utils2p.main
>>> type(metadata)
<class 'utils2p.main.Metadata'>
"""
-
[docs] def get_metadata_value(self, *args):
"""
This function returns a value from the metadata file 'Experiment.xml'.
-
+
Parameters
----------
args : strings
- Arbitrary number of strings of tag from the xml file in the correct order.
- See examples.
-
+ Arbitrary number of strings of tags from the xml file in the
+ correct order. See examples.
+
Returns
-------
attribute or node : string or ElementTree node
- If the number of strings given in args leads to a leaf of the tree, the attribute,
- usually a dictionary is returned. Otherwise the node is returned.
-
+ If the number of strings given in args leads to a leaf of the tree,
+ the attribute, usually a dictionary, is returned.
+ Otherwise the node is returned.
+
Examples
--------
>>> import utils2p
@@ -139,7 +141,7 @@ Source code for utils2p.main
[docs] def get_n_time_points(self):
"""
Returns the number of time points for a given experiment metadata.
-
+
Returns
-------
n_time_points : int
@@ -158,7 +160,7 @@ Source code for utils2p.main
"""
Returns the image width for a given experiment metadata,
i.e. the number of pixels in the x direction.
-
+
Returns
-------
width : int
@@ -177,7 +179,7 @@ Source code for utils2p.main
"""
Returns the image height for a given experiment metadata,
i.e. the number of pixels in the y direction.
-
+
Returns
-------
height : int
@@ -196,7 +198,7 @@ Source code for utils2p.main
"""
Returns the area mode of a given experiment metadata, e.g.
square, rectangle, line, kymograph.
-
+
Returns
-------
area_mode : string
@@ -212,21 +214,19 @@ Source code for utils2p.main
int_area_mode = int(self.get_metadata_value("LSM", "areaMode"))
if int_area_mode == 0:
return "square"
- elif int_area_mode == 1:
+ if int_area_mode == 1:
return "rectangle"
- elif int_area_mode == 2:
+ if int_area_mode == 2:
return "kymograph"
- elif int_area_mode == 3:
+ if int_area_mode == 3:
return "line"
- else:
- raise InvalidValueInMetaData(
- f"{int_area_mode} is not a valid value for areaMode."
- )
+ raise InvalidValueInMetaData(
+ f"{int_area_mode} is not a valid value for areaMode.")
[docs] def get_n_z(self):
"""
Returns the number for z slices for a given experiment metadata.
-
+
Returns
-------
n_z : int
@@ -262,7 +262,7 @@ Source code for utils2p.main
[docs] def get_n_channels(self):
"""
Returns the number of channels for a given experiment metadata.
-
+
Returns
-------
n_channels : int
@@ -279,8 +279,8 @@ Source code for utils2p.main
[docs] def get_channels(self):
"""
- Retruns a tuple with the names of all channels.
-
+ Returns a tuple with the names of all channels.
+
Returns
-------
channels : tuple of strings
@@ -299,7 +299,7 @@ Source code for utils2p.main
[docs] def get_pixel_size(self):
"""
Returns the pixel size for a given experiment metadata.
-
+
Returns
-------
pixel_size : float
@@ -317,7 +317,7 @@ Source code for utils2p.main
[docs] def get_z_step_size(self):
"""
Returns the z step size for a given experiment metadata.
-
+
Returns
-------
z_step_size : float
@@ -340,7 +340,7 @@ Source code for utils2p.main
equal to the step size, unless the number of pixels equals the number
of steps.
For all other types of recordings it is equivalent to :func:`get_z_step_size`.
-
+
Returns
-------
z_pixel_size : float
@@ -354,18 +354,15 @@ Source code for utils2p.main
15.0
"""
area_mode = self.get_area_mode()
- if area_mode == "line" or area_mode == "kymograph":
- return (
- float(self.get_metadata_value("ZStage", "stepSizeUM"))
- * self.get_n_z()
- / self.get_num_y_pixels()
- )
+ if area_mode in ('line', 'kymograph'):
+ return (float(self.get_metadata_value("ZStage", "stepSizeUM")) *
+ self.get_n_z() / self.get_num_y_pixels())
return float(self.get_metadata_value("ZStage", "stepSizeUM"))
[docs] def get_dwell_time(self):
"""
Returns the dwell time for a given experiment metadata.
-
+
Returns
-------
dwell_time : float
@@ -397,7 +394,7 @@ Source code for utils2p.main
Returns the frame rate for a given experiment metadata.
When the frame rate is calculated flyback frames and
steps in z are not considered frames.
-
+
Returns
-------
frame_rate : float
@@ -410,15 +407,17 @@ Source code for utils2p.main
>>> metadata.get_frame_rate()
10.0145
"""
- frame_rate_without_flybacks = float(self.get_metadata_value("LSM", "frameRate"))
+ frame_rate_without_flybacks = float(
+ self.get_metadata_value("LSM", "frameRate"))
flyback_frames = self.get_n_flyback_frames()
number_of_slices = self.get_n_z()
- return frame_rate_without_flybacks / (flyback_frames + number_of_slices)
+ return frame_rate_without_flybacks / (flyback_frames +
+ number_of_slices)
[docs] def get_width(self):
"""
- Returbns the image with in um for a given experiment metadata.
-
+ Returns the image width in um for a given experiment metadata.
+
Returns
-------
width : float
@@ -438,7 +437,7 @@ Source code for utils2p.main
Returns the starting position of power regulator 1 for a given
experiment metadata. Unless a gradient is defined, this
value is the power value for the entire experiment.
-
+
Returns
-------
reg1_start : float
@@ -453,10 +452,10 @@ Source code for utils2p.main
"""
return float(self.get_metadata_value("PowerRegulator", "start"))
-[docs] def get_gainA(self):
+[docs] def get_gain_a(self):
"""
Returns the gain of channel A for a given experiment metadata.
-
+
Returns
-------
gainA : int
@@ -466,15 +465,15 @@ Source code for utils2p.main
--------
>>> import utils2p
>>> metadata = Metadata('data/mouse_kidney_time_series_z_stack/Experiment.xml')
- >>> metadata.get_gainA()
+ >>> metadata.get_gain_a()
20.0
"""
return float(self.get_metadata_value("PMT", "gainA"))
-[docs] def get_gainB(self):
+[docs] def get_gain_b(self):
"""
Returns the gain of channel B for a given experiment metadata.
-
+
Returns
-------
gainB : int
@@ -484,15 +483,16 @@ Source code for utils2p.main
--------
>>> import utils2p
>>> metadata = Metadata('data/mouse_kidney_time_series_z_stack/Experiment.xml')
- >>> metadata.get_gainB()
+ >>> metadata.get_gain_b()
30.0
"""
return float(self.get_metadata_value("PMT", "gainB"))
[docs] def get_date_time(self):
"""
- Returns the date and time of an experiment for a given experiment metadata.
-
+ Returns the date and time of an experiment
+ for a given experiment metadata.
+
Returns
-------
date_time : string
@@ -511,7 +511,7 @@ Source code for utils2p.main
[docs]def load_img(path, memmap=False):
"""
This functions loads an image from file and returns as a numpy array.
-
+
Parameters
----------
path : string
@@ -525,8 +525,8 @@ Source code for utils2p.main
Returns
-------
- numpy.array
- Image in form of numpy array.
+ numpy.array or numpy.memmap
+ Image in form of numpy array or numpy memmap.
Examples
--------
@@ -548,7 +548,7 @@ Source code for utils2p.main
"""
This function loads a stack in several batches to make sure
the system does not run out of memory. It returns a generator
- the yields consecutive chunks of `batch_size` frames of the stack.
+ that yields consecutive chunks of `batch_size` frames of the stack.
The remaining memory is freed up by the function until the generator
is called again.
@@ -567,10 +567,11 @@ Source code for utils2p.main
"""
stack = load_img(path, memmap=True)
if stack.ndim < 3:
- raise ValueError(f"The path does not point to a stack. The shape is {stack.shape}.")
+ raise ValueError(
+ f"The path does not point to a stack. The shape is {stack.shape}.")
n_batches = int(stack.shape[0] / batch_size) + 1
for i in range(n_batches):
- substack = np.array(stack[i * batch_size : (i + 1) * batch_size])
+ substack = np.array(stack[i * batch_size:(i + 1) * batch_size])
yield substack
@@ -578,7 +579,7 @@ Source code for utils2p.main
"""
Returns a generator that yields patches of the stack of images.
This is useful when multiple stacks should be processed but they
- don't fit in memory, e.g. when computing an overall fluorescence
+ don't fit into memory, e.g. when computing an overall fluorescence
baseline for all trials of a fly.
Parameters
@@ -588,9 +589,9 @@ Source code for utils2p.main
patch_size : tuple of two integers
Size of the patch returned.
padding : integer or tuple of two integers
- The amount of overlap between patched. Note that this increases
- the effective patch size. Default is 0. For tuples different padding
- if used for the dimensions.
+ The amount of overlap between patches. Note that this increases
+ the effective patch size. Default is 0. If tuple, different padding
+ is used for the dimensions.
return_indices : boolean
If True, the indices necessary for slicing to generate the patch and
the indices necessary for slicing to remove the padding from the
@@ -641,7 +642,7 @@ Source code for utils2p.main
>>> np.all(stack1[:, indices[0][0] : indices[0][1], indices[1][0] : indices[1][1]] == first_patch_without_padding)
True
- Note that the patch has not padding at the edges.
+ Note that the patch has no padding at the edges.
When looking at the second patch we see that it is padded on both side
in the second dimension but still only on one side of the first dimension.
@@ -656,7 +657,7 @@ Source code for utils2p.main
>>> print(second_patch_without_padding.shape)
(5, 15, 20)
"""
- stack = load_img(path)
+ stack = load_img(path, memmap=True)
dims = stack.shape[1:]
n_patches_0 = math.ceil(dims[0] / patch_size[0])
n_patches_1 = math.ceil(dims[1] / patch_size[1])
@@ -665,14 +666,15 @@ Source code for utils2p.main
for i in range(n_patches_0):
for j in range(n_patches_1):
indices = [
- [patch_size[0] * i, patch_size[0] * (i + 1)],
- [patch_size[1] * j, patch_size[1] * (j + 1)],
- ]
+ [patch_size[0] * i, patch_size[0] * (i + 1)],
+ [patch_size[1] * j, patch_size[1] * (j + 1)],
+ ]
start_dim_0 = max(indices[0][0] - padding[0], 0)
start_dim_1 = max(indices[1][0] - padding[1], 0)
stop_dim_0 = min(indices[0][1] + padding[0], dims[0])
stop_dim_1 = min(indices[1][1] + padding[1], dims[1])
- patch = stack[:, start_dim_0:stop_dim_0, start_dim_1:stop_dim_1].copy()
+ patch = stack[:, start_dim_0:stop_dim_0,
+ start_dim_1:stop_dim_1].copy()
del stack
if not return_indices:
yield patch
@@ -680,8 +682,8 @@ Source code for utils2p.main
offset_dim_0 = indices[0][0] - start_dim_0
offset_dim_1 = indices[1][0] - start_dim_1
patch_indices = [
- [offset_dim_0, patch_size[0] + offset_dim_0],
- [offset_dim_1, patch_size[1] + offset_dim_1],
+ [offset_dim_0, patch_size[0] + offset_dim_0],
+ [offset_dim_1, patch_size[1] + offset_dim_1],
]
yield patch, indices, patch_indices
stack = load_img(path)
@@ -701,8 +703,9 @@ Source code for utils2p.main
Returns
-------
stacks : tuple of numpy arrays
- Number of numpy arrays depends on the number of channels recoded during the experiment.
- Has the following dimensions: TZYX or TYX for planar images.
+ Number of numpy arrays depends on the number of channels recoded during
+ the experiment. Has the following dimensions:
+ TZYX or TYX for planar images.
Examples
--------
@@ -729,32 +732,33 @@ Source code for utils2p.main
) # divide by two because the values are of type short (16bit = 2byte)
assert (
- not n_z % 1
- ), "Size given in metadata does not match the size of the raw file."
+ not n_z %
+ 1), "Size given in metadata does not match the size of the raw file."
n_z = int(n_z)
- # number of z slices from meta data can be different because of flyback frames
+ # number of z slices from meta data can be different
+ # because of flyback frames
meta_n_z = metadata.get_n_z()
if n_z == 1:
- stacks = np.zeros((n_channels, n_time_points, height, width), dtype="uint16")
+ stacks = np.zeros((n_channels, n_time_points, height, width),
+ dtype="uint16")
image_size = width * height
- t_size = (
- width * height * n_channels
- ) # number of values stored for a given time point (this includes images for all channels)
+ # number of values stored for a given time point
+ # (this includes images for all channels)
+ t_size = (width * height * n_channels)
with open(path, "rb") as f:
for t in range(n_time_points):
# print('{}/{}'.format(t,n_time_points))
a = array.array("H")
a.fromfile(f, t_size)
for c in range(n_channels):
- stacks[c, t, :, :] = np.array(
- a[c * image_size : (c + 1) * image_size]
- ).reshape((height, width))
+ stacks[c, t, :, :] = np.array(a[c * image_size:(c + 1) *
+ image_size]).reshape(
+ (height, width))
elif n_z > 1:
- stacks = np.zeros(
- (n_channels, n_time_points, meta_n_z, height, width), dtype="uint16"
- )
+ stacks = np.zeros((n_channels, n_time_points, meta_n_z, height, width),
+ dtype="uint16")
image_size = width * height
t_size = (
width * height * n_z * n_channels
@@ -765,22 +769,21 @@ Source code for utils2p.main
a = array.array("H")
a.fromfile(f, t_size)
a = np.array(a).reshape(
- (-1, image_size)
- ) # each row is an image alternating between channels
+ (-1, image_size
+ )) # each row is an image alternating between channels
for c in range(n_channels):
stacks[c, t, :, :, :] = a[c::n_channels, :].reshape(
- (n_z, height, width)
- )[:meta_n_z, :, :]
+ (n_z, height, width))[:meta_n_z, :, :]
area_mode = metadata.get_area_mode()
- if (area_mode == "line" or area_mode == "kymograph") and meta_n_z > 1:
+ if area_mode in ('line', 'kymograph') and meta_n_z > 1:
concatenated = []
for stack in stacks:
concatenated.append(concatenate_z(stack))
stacks = concatenated
if len(stacks) == 1:
- return (np.squeeze(stacks[0]),)
+ return (np.squeeze(stacks[0]), )
return tuple(np.squeeze(stacks))
@@ -799,7 +802,7 @@ Source code for utils2p.main
-------
stacks : tuple of numpy arrays
Z-stacks for Channel A (green) and Channel B (red).
-
+
Examples
--------
>>> import utils2p
@@ -849,9 +852,12 @@ Source code for utils2p.main
return np.squeeze(res)
-[docs]def save_img(
- path, img, imagej=True, color=False, full_dynamic_range=True, metadata=None
-):
+[docs]def save_img(path,
+ img,
+ imagej=True,
+ color=False,
+ full_dynamic_range=True,
+ metadata=None):
"""
Saves an image that is given as a numpy array to file.
@@ -871,12 +877,6 @@ Source code for utils2p.main
When an image is converted to uint8 for saving a color image the
max value of the output image is the max of uint8,
i.e. the image uses the full dynamic range available.
-
- Examples
- --------
- >>> import utils2p
- >>> import numpy as np
- >>>
"""
if img.dtype == np.bool:
img = img.astype(np.uint8) * 255
@@ -891,8 +891,7 @@ Source code for utils2p.main
old_max = np.finfo(img.dtype).max * np.ones(3)
else:
raise ValueError(
- f"img must be integer or float type not {img.dtype}"
- )
+ f"img must be integer or float type not {img.dtype}")
new_max = np.iinfo(np.uint8).max
img = img / old_max * new_max
img = img.astype(np.uint8)
@@ -915,7 +914,7 @@ Source code for utils2p.main
else:
# TODO add meta data like metadata={'xresolution':'4.25','yresolution':'0.0976','PixelAspectRatio':'43.57'}
# tifffile.imsave(path, img, imagej=imagej, metadata={})
- raise NotImplemented("Saving of metadata is not yet implemented")
+ raise NotImplementedError("Saving of metadata is not yet implemented")
def _find_file(directory, name, file_type, most_recent=True):
@@ -927,15 +926,12 @@ Source code for utils2p.main
----------
directory : str
Directory in which to search.
+ name : str
+ Name of the file.
most_recent : bool
If True, the file with the most recent change time
is returned and no exception is raised if multiple
files are present.
- name : str
- Name of the file.
- most_recent : bool
- If True, the most recently modified file is returned
- and no error is raised if multiple files were found.
Returns
-------
@@ -946,19 +942,20 @@ Source code for utils2p.main
if len(file_names) > 1:
if most_recent:
change_times = [os.stat(path).st_mtime for path in file_names]
- file_names = (file_names[np.argmax(change_times)],)
+ file_names = (file_names[np.argmax(change_times)], )
else:
raise RuntimeError(
- f"Could not identify {file_type} file unambiguously. Discovered {len(file_names)} {file_type} files in {directory}."
- )
+ f"Could not identify {file_type} file unambiguously. " +
+ f"Discovered {len(file_names)} {file_type} files in {directory}."
+ )
elif len(file_names) == 0:
- raise FileNotFoundError(f"No {file_type} file found in {directory}")
+ raise FileNotFoundError(f"No {file_type} file found in {directory}")
return str(file_names[0])
[docs]def find_metadata_file(directory, most_recent=False):
"""
- This functions find the path to the metadata file
+ This function finds the path to the metadata file
"Experiment.xml" created by ThorImage and returns it.
If multiple files with this name are found, it throws
an exception unless `most_recent` is `True`, in which case
@@ -984,16 +981,19 @@ Source code for utils2p.main
>>> utils2p.find_metadata_file("data/mouse_kidney_z_stack")
'data/mouse_kidney_z_stack/Experiment.xml'
"""
- return _find_file(directory, "Experiment.xml", "metadata", most_recent=most_recent)
+ return _find_file(directory,
+ "Experiment.xml",
+ "metadata",
+ most_recent=most_recent)
[docs]def find_seven_camera_metadata_file(directory, most_recent=False):
"""
- This functions find the path to the metadata file
+ This function finds the path to the metadata file
"capture_metadata.json" created by seven camera
setup and returns it.
If multiple files with this name are found, it throws
- an exception unless `most_recent` is `True`,in which case
+ an exception unless `most_recent` is `True`, in which case
the file with the most recent change time is returned.
Parameters
@@ -1016,12 +1016,15 @@ Source code for utils2p.main
>>> utils2p.find_seven_camera_metadata_file("data/mouse_kidney_raw")
'data/mouse_kidney_raw/behData/images/capture_metadata.json'
"""
- return _find_file(directory, "capture_metadata.json", "seven camera capture metadata", most_recent=most_recent)
+ return _find_file(directory,
+ "capture_metadata.json",
+ "seven camera capture metadata",
+ most_recent=most_recent)
[docs]def find_sync_file(directory, most_recent=False):
"""
- This functions find the path to the sync file
+ This function finds the path to the sync file
"Episode001.h5" created by ThorSync and returns it.
If multiple files with this name are found, it throws
an exception unless `most_recent` is `True`, in which case
@@ -1047,12 +1050,15 @@ Source code for utils2p.main
>>> utils2p.find_sync_file("data/mouse_kidney_z_stack")
'data/mouse_kidney_z_stack/Episode001.h5'
"""
- return _find_file(directory, "Episode001.h5", "synchronization", most_recent=most_recent)
+ return _find_file(directory,
+ "Episode001.h5",
+ "synchronization",
+ most_recent=most_recent)
[docs]def find_optical_flow_file(directory, most_recent=False):
"""
- This functions find the path to the optical flow file
+ This function finds the path to the optical flow file
"OptFlow.txt" created by seven camera software and returns it.
If multiple files with this name are found, it throws
an exception unless `most_recent` is `True`,in which case
@@ -1078,12 +1084,15 @@ Source code for utils2p.main
>>> utils2p.find_optical_flow_file("data/mouse_kidney_raw")
'data/mouse_kidney_raw/behData/OptFlowData/OptFlow.txt'
"""
- return _find_file(directory, "OptFlow.txt", "optical flow", most_recent=most_recent)
+ return _find_file(directory,
+ "OptFlow.txt",
+ "optical flow",
+ most_recent=most_recent)
[docs]def find_raw_file(directory, most_recent=False):
"""
- This functions find the path to the raw file
+ This function finds the path to the raw file
"Image_0001_0001.raw" created by ThorImage and returns it.
If multiple files with this name are found, it throws
an exception unless `most_recent` is `True`, in which case
@@ -1109,12 +1118,15 @@ Source code for utils2p.main
>>> utils2p.find_raw_file("data/mouse_kidney_raw")
'data/mouse_kidney_raw/2p/Untitled_001/Image_0001_0001.raw'
"""
- return _find_file(directory, "Image_0001_0001.raw", "raw", most_recent=most_recent)
+ return _find_file(directory,
+ "Image_0001_0001.raw",
+ "raw",
+ most_recent=most_recent)
[docs]def find_sync_metadata_file(directory, most_recent=False):
"""
- This function ifn the path to the synchonization
+ This function finds the path to the synchronization
metadata file "ThorRealTimeDataSettings.xml" created
by ThorSync. If multiple files with this name are found,
it throws an exception unless `most_recent` is `True`,
@@ -1142,12 +1154,15 @@ Source code for utils2p.main
'data/mouse_kidney_raw/2p/Sync-025/ThorRealTimeDataSettings.xml'
"""
- return _find_file(directory, "ThorRealTimeDataSettings.xml", "synchronization metadata", most_recent=most_recent)
+ return _find_file(directory,
+ "ThorRealTimeDataSettings.xml",
+ "synchronization metadata",
+ most_recent=most_recent)
[docs]def find_fictrac_file(directory, camera=3, most_recent=False):
"""
- This function ifn the path to the output file of
+ This function finds the path to the output file of
fictrac of the form `camera_{cam}*.dat`, where
`{cam}` is the values specified in the `camera`
argument. If multiple files with this name are found,
@@ -1181,19 +1196,25 @@ Source code for utils2p.main
>>> utils2p.find_fictrac_file("data", most_recent=True)
'data/camera_3-20210803_103010.dat'
"""
- return _find_file(directory, f"camera_{camera}*.dat", "fictrac output", most_recent=most_recent)
-
-
-[docs]def load_optical_flow(
- path: str, gain_0_x: float, gain_0_y: float, gain_1_x: float, gain_1_y: float, smoothing_kernel=None
-):
+ return _find_file(directory,
+ f"camera_{camera}*.dat",
+ "fictrac output",
+ most_recent=most_recent)
+
+
+[docs]def load_optical_flow(path: str,
+ gain_0_x: float,
+ gain_0_y: float,
+ gain_1_x: float,
+ gain_1_y: float,
+ smoothing_kernel=None):
"""
This function loads the optical flow data from
the file specified in path. By default it is
directly converted into ball rotation. Gain values
have to be determined with the calibration of the
optical flow sensors.
-
+
Parameters
----------
path : str
@@ -1257,7 +1278,7 @@ Source code for utils2p.main
<class 'dict'>
>>> optical_flow["sensor0"].keys()
dict_keys(['x', 'y', 'gain_x', 'gain_y'])
-
+
>>> optical_flow = utils2p.load_optical_flow(optical_flow_file, gain_0_x, gain_0_y, gain_1_x, gain_1_y, smoothing_kernel=np.ones(300) / 300)
>>> optical_flow["vel_pitch"].shape
(1408,)
@@ -1265,8 +1286,13 @@ Source code for utils2p.main
raw_data = np.genfromtxt(path, delimiter=",")
if smoothing_kernel is not None:
if len(smoothing_kernel) >= raw_data.shape[0]:
- raise ValueError(f"smoothing_kernel of shape {smoothing_kernel.shape} is longer than optical flow data of shape {raw_data.shape}.")
- raw_data = np.apply_along_axis(lambda m: np.convolve(m, smoothing_kernel, mode="same"), axis=0, arr=raw_data)
+ raise ValueError(
+ f"smoothing_kernel of shape {smoothing_kernel.shape} " +
+ f"is longer than optical flow data of shape {raw_data.shape}.")
+ raw_data = np.apply_along_axis(
+ lambda m: np.convolve(m, smoothing_kernel, mode="same"),
+ axis=0,
+ arr=raw_data)
data = {
"sensor0": {
"x": raw_data[:, 0],
@@ -1283,54 +1309,70 @@ Source code for utils2p.main
"time_stamps": raw_data[:, 4],
}
- data["vel_pitch"] = -(
- data["sensor0"]["y"] * data["sensor0"]["gain_y"]
- + data["sensor1"]["y"] * data["sensor1"]["gain_y"]
- ) * np.cos(np.deg2rad(45))
- data["vel_yaw"] = (
- data["sensor0"]["x"] * data["sensor0"]["gain_x"]
- + data["sensor1"]["x"] * data["sensor1"]["gain_x"]
- ) / 2.0
- data["vel_roll"] = (
- data["sensor0"]["y"] * data["sensor0"]["gain_y"]
- - data["sensor1"]["y"] * data["sensor1"]["gain_y"]
- ) * np.sin(np.deg2rad(45))
+ data["vel_pitch"] = -(data["sensor0"]["y"] * data["sensor0"]["gain_y"] +
+ data["sensor1"]["y"] *
+ data["sensor1"]["gain_y"]) * np.cos(np.deg2rad(45))
+ data["vel_yaw"] = (data["sensor0"]["x"] * data["sensor0"]["gain_x"] +
+ data["sensor1"]["x"] * data["sensor1"]["gain_x"]) / 2.0
+ data["vel_roll"] = (data["sensor0"]["y"] * data["sensor0"]["gain_y"] -
+ data["sensor1"]["y"] *
+ data["sensor1"]["gain_y"]) * np.sin(np.deg2rad(45))
return data
-[docs]def load_fictrac(path, ball_radius=10, fps=100, camera=3):
- col_names = ["Frame_counter",
- "delta_rot_cam_right", "delta_rot_cam_down", "delta_rot_cam_forward",
- "delta_rot_error",
- "delta_rot_lab_side", "delta_rot_lab_forward", "delta_rot_lab_turn",
- "abs_rot_cam_right", "abs_rot_cam_down", "abs_rot_cam_forward",
- "abs_rot_lab_side", "abs_rot_lab_forward", "abs_rot_lab_turn",
- "integrated_lab_x", "integrated_lab_y",
- "integrated_lab_heading",
- "animal_movement_direction_lab",
- "animal_movement_speed",
- "integrated_forward_movement", "integrated_side_movement",
- "timestamp",
- "seq_counter",
- "delta_time",
- "alt_time"
- ]
-
+[docs]def load_fictrac(path, ball_radius=5, fps=100):
+ """
+ This functions loads the fictrac data from file.
+
+ Parameters
+ ----------
+ path : str
+ Path to fictrac output file (.dat).
+ ball_radius : int
+ Radius of the spherical treadmill.
+ fps : float
+ Number of frames per second.
+
+ Returns
+ -------
+ data : dictionary
+ A dictionary with the following keys:
+ Speed, x, y, forward_pos, side_pos, delta_rot_lab_side,
+ delta_rot_lab_forward, delta_rot_lab_turn, integrated_forward_movement,
+ integrated_side_movement, Time
+ All speeds are in mm/s and all positions are in mm.
+ """
+ col_names = [
+ "Frame_counter", "delta_rot_cam_right", "delta_rot_cam_down",
+ "delta_rot_cam_forward", "delta_rot_error", "delta_rot_lab_side",
+ "delta_rot_lab_forward", "delta_rot_lab_turn", "abs_rot_cam_right",
+ "abs_rot_cam_down", "abs_rot_cam_forward", "abs_rot_lab_side",
+ "abs_rot_lab_forward", "abs_rot_lab_turn", "integrated_lab_x",
+ "integrated_lab_y", "integrated_lab_heading",
+ "animal_movement_direction_lab", "animal_movement_speed",
+ "integrated_forward_movement", "integrated_side_movement", "timestamp",
+ "seq_counter", "delta_time", "alt_time"
+ ]
+
dat_table = np.genfromtxt(path, delimiter=",")
data = {}
for i, col in enumerate(col_names):
- data[col] = dat_table[:, i]
+ data[col] = dat_table[:, i]
data["Speed"] = data["animal_movement_speed"] * ball_radius * fps
data["x"] = data["integrated_lab_x"] * ball_radius
data["y"] = data["integrated_lab_y"] * ball_radius
data["forward_pos"] = data["integrated_forward_movement"] * ball_radius
data["side_pos"] = data["integrated_side_movement"] * ball_radius
data["delta_rot_lab_side"] = data["delta_rot_lab_side"] * ball_radius * fps
- data["delta_rot_lab_forward"] = data["delta_rot_lab_forward"] * ball_radius * fps
- data["delta_rot_lab_turn"] = data["delta_rot_lab_turn"] / 2 / np.pi * 360 * fps
- data["integrated_forward_movement"] = data["integrated_forward_movement"] * ball_radius
- data["integrated_side_movement"] = data["integrated_side_movement"] * ball_radius
+ data["delta_rot_lab_forward"] = data[
+ "delta_rot_lab_forward"] * ball_radius * fps
+ data["delta_rot_lab_turn"] = data[
+ "delta_rot_lab_turn"] / 2 / np.pi * 360 * fps
+ data["integrated_forward_movement"] = data[
+ "integrated_forward_movement"] * ball_radius
+ data["integrated_side_movement"] = data[
+ "integrated_side_movement"] * ball_radius
data["Time"] = data["Frame_counter"] / fps
return data
@@ -1352,7 +1394,7 @@ utils2p
Navigation
-
+
- Core functions
- Synchronization functions
@@ -1372,7 +1414,7 @@ Related Topics
Quick search
@@ -1394,7 +1436,7 @@ Quick search
©2020, Florian Aymanns.
|
- Powered by Sphinx 2.4.4
+ Powered by Sphinx 4.1.2
& Alabaster 0.7.12
diff --git a/docs/build/html/_modules/utils2p/synchronization.html b/docs/build/html/_modules/utils2p/synchronization.html
index 3362145..ba36a5d 100644
--- a/docs/build/html/_modules/utils2p/synchronization.html
+++ b/docs/build/html/_modules/utils2p/synchronization.html
@@ -1,17 +1,18 @@
-
+
+
utils2p.synchronization — utils2p documentation
-
-
-
+
+
+
+
-
@@ -38,21 +39,28 @@ Source code for utils2p.synchronization
This module provides functions to process the synchronization data
acquired with Thor Sync during imaging.
"""
+import warnings
+import json
import numpy as np
import h5py
-import json
import scipy.signal
import utils2p.main as main
+
class SynchronizationError(Exception):
"""The input data is not consistent with synchronization assumption."""
- pass
+
+def get_lines_from_h5_file(file_path, line_names):
+ warnings.warn(
+ "get_lines_from_h5_file is deprecated use get_lines_from_sync_file instead",
+ DeprecationWarning)
+ return get_lines_from_sync_file(file_path, line_names)
-[docs]def get_lines_from_h5_file(file_path, line_names):
+[docs]def get_lines_from_sync_file(file_path, line_names):
"""
This function returns the values of the requested lines save in
an h5 generated by ThorSync.
@@ -86,22 +94,35 @@ Source code for utils2p.synchronization
(54000,)
"""
lines = []
-
+
with h5py.File(file_path, "r") as f:
for name in line_names:
- try:
+ lines_with_this_name = []
+ for line_type in ("DI", "CI", "AI"):
try:
- try:
- lines.append(f["DI"][name][:].squeeze())
- except KeyError:
- lines.append(f["CI"][name][:].squeeze())
+ lines_with_this_name.append(
+ f[line_type][name][:].squeeze())
except KeyError:
- lines.append(f["AI"][name][:].squeeze())
- except KeyError:
+ pass
+ if len(lines_with_this_name) == 1:
+ lines.append(lines_with_this_name[0])
+ elif len(lines_with_this_name) == 0:
DI_keys = list(f["DI"].keys())
CI_keys = list(f["CI"].keys())
AI_keys = list(f["AI"].keys())
- raise KeyError(f"No line named '{name}' exists. The digital lines are {DI_keys}, the continuous lines are {CI_keys}, and the analogue inputs are {AI_keys}.")
+ raise KeyError(
+ f"No line named '{name}' exists. The digital lines are " +
+ f"{DI_keys}, the continuous lines are {CI_keys}, " +
+ f"and the analogue inputs are {AI_keys}.")
+ else:
+ DI_keys = list(f["DI"].keys())
+ CI_keys = list(f["CI"].keys())
+ AI_keys = list(f["AI"].keys())
+ raise KeyError(
+ f"Multiple lines named '{name}' exist. " +
+ f"The digital lines are {DI_keys}, the continuous lines " +
+ + f"are {CI_keys}, and the analogue inputs are {AI_keys}."
+ )
return tuple(lines)
@@ -147,11 +168,11 @@ Source code for utils2p.synchronization
size : float or tuple
Size of the rising edge. If float it is used as minimum.
Tuples specify a range. To get falling edges use negative values.
- Only one boundary can be applied using np.inf as on of the values.
- All boundaries are exclusive the specified value.
+ Only one boundary can be applied using np.inf as one of the values.
+ All boundaries are excluding the specified value.
correct_possible_split_edges : boolean
The rise or fall of an edge can in some cases be spread over
- several ticks. If True theses "blurry" edges are sharpened
+ several ticks. If `True` these "blurry" edges are sharpened
with :func:`utils2p.synchronization.correct_split_edges`.
Default is True.
@@ -182,14 +203,15 @@ Source code for utils2p.synchronization
if correct_possible_split_edges:
line = correct_split_edges(line)
diff = np.diff(line.astype(np.float64))
- if type(size) == tuple:
+ if isinstance(size, tuple):
zero_elements = np.isclose(diff, np.zeros_like(diff))
edges_in_range = np.logical_and(diff > size[0], diff < size[1])
- valid_edges = np.logical_and(edges_in_range, np.logical_not(zero_elements))
+ valid_edges = np.logical_and(edges_in_range,
+ np.logical_not(zero_elements))
indices = np.where(valid_edges)
else:
indices = np.where(diff > size)
- indices = tuple([i + 1 for i in indices])
+ indices = tuple(i + 1 for i in indices)
return indices
@@ -217,17 +239,17 @@ Source code for utils2p.synchronization
"""
rising_edges = np.where(np.diff(line) > 0)[0] + 1
falling_edges = np.where(np.diff(line) < 0)[0]
-
+
split_rising_edges = np.where(np.diff(rising_edges) == 1)[0]
split_falling_edges = np.where(np.diff(falling_edges) == 1)[0]
-
+
if len(split_rising_edges) == 0 and len(split_falling_edges) == 0:
return line
-
+
first_halfs_rising = rising_edges[split_rising_edges]
second_halfs_rising = rising_edges[split_rising_edges + 1]
line[first_halfs_rising] = line[second_halfs_rising]
-
+
first_halfs_falling = falling_edges[split_falling_edges]
second_halfs_falling = falling_edges[split_falling_edges + 1]
line[second_halfs_falling] = line[first_halfs_falling]
@@ -277,10 +299,10 @@ Source code for utils2p.synchronization
indices = edges(line, size=(0, np.inf))
if zero_based_counter and line[0] >= 0:
if line[0] > 0:
- warnings.warn(f"The counters start with value {line[0]}")
+ warnings.warn(f"The counter start with value {line[0]}")
indices_with_first_frame = np.zeros(len(indices[0]) + 1, dtype=int)
indices_with_first_frame[1:] = indices[0]
- indices = (indices_with_first_frame,)
+ indices = (indices_with_first_frame, )
time_points = times[indices]
return time_points
@@ -321,15 +343,15 @@ Source code for utils2p.synchronization
return capture_info
-[docs]def process_cam_line(line, capture_json):
+[docs]def process_cam_line(line, seven_camera_metadata):
"""
- Remove superfluous signals and use frame numbers in array.
+ Removes superfluous signals and uses frame numbers in array.
The cam line signal form the h5 file is a binary sequence.
Rising edges mark the acquisition of a new frame.
The setup keeps producing rising edges after the acquisition of the
last frame. These rising edges are ignored.
- This function converts it to frame numbers using the information
- stored in the metadata file of the seven camera setup.
+ This function converts the binary line to frame numbers using the
+ information stored in the metadata file of the seven camera setup.
In the metadata file the keys are the indices of the file names
and the values are the grabbed frame numbers. Suppose the 3
frame was dropped. Then the entries in the dictionary will
@@ -342,8 +364,8 @@ Source code for utils2p.synchronization
----------
line : numpy array
Line signal from h5 file.
- capture_json : string
- Path to the json file save by our camera software.
+ seven_camera_metadata : string
+ Path to the json file saved by our camera software.
This file is usually located in the same folder as the frames
and is called 'capture_metadata.json'. If None, it is assumed
that no frames were dropped.
@@ -352,7 +374,8 @@ Source code for utils2p.synchronization
-------
processed_line : numpy array
Array with frame number for each time point.
- If no frame is available for a given time the value is -9223372036854775808.
+ If no frame is available for a given time,
+ the value is -9223372036854775808.
Examples
--------
@@ -360,16 +383,16 @@ Source code for utils2p.synchronization
>>> import utils2p.synchronization
>>> import numpy as np
>>> h5_file = utils2p.find_sync_file("data/mouse_kidney_raw")
- >>> capture_json = utils2p.find_seven_camera_metadata_file("data/mouse_kidney_raw")
+ >>> seven_camera_metadata = utils2p.find_seven_camera_metadata_file("data/mouse_kidney_raw")
>>> line_names = ["Basler"]
>>> (cam_line,) = utils2p.synchronization.get_lines_from_h5_file(h5_file, line_names)
>>> set(np.diff(cam_line))
{0, 8, 4294967288}
- >>> processed_cam_line = utils2p.synchronization.process_cam_line(cam_line, capture_json)
+ >>> processed_cam_line = utils2p.synchronization.process_cam_line(cam_line, seven_camera_metadata)
>>> set(np.diff(processed_cam_line))
{0, 1, -9223372036854775808, 9223372036854775749}
>>> cam_line = np.array([0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0])
- >>> utils2p.synchronization.process_cam_line(cam_line, capture_json=None)
+ >>> utils2p.synchronization.process_cam_line(cam_line, seven_camera_metadata=None)
array([-9223372036854775808, 0, 0,
0, 0, 0,
1, 1, 1,
@@ -383,11 +406,13 @@ Source code for utils2p.synchronization
rising_edges = edges(line, (0, np.inf))[0]
# Load capture metadata or generate default
- if capture_json is not None:
- with open(capture_json, "r") as f:
+ if seven_camera_metadata is not None:
+ with open(seven_camera_metadata, "r") as f:
capture_info = json.load(f)
else:
- capture_info = _capture_metadata([len(rising_edges),])
+ capture_info = _capture_metadata([
+ len(rising_edges),
+ ])
# Find the number of frames for each camera
n_frames = []
@@ -397,14 +422,17 @@ Source code for utils2p.synchronization
# Ensure all cameras acquired the same number of frames
if len(np.unique(n_frames)) > 1:
- raise SynchronizationError("The frames across cameras are not synchronized.")
+ raise SynchronizationError(
+ "The frames across cameras are not synchronized.")
# Last rising edge that corresponds to a frame
last_tick = max(n_frames)
# check that there is a rising edge for every frame
if len(rising_edges) < last_tick:
- raise ValueError("The provided cam line and metadata are inconsistent. cam line has less frame acquisitions than metadata.")
+ raise ValueError(
+ "The provided cam line and metadata are inconsistent. " +
+ "cam line has less frame acquisitions than metadata.")
# Ensure correct handling if no rising edges are present after last frame
if len(rising_edges) == int(last_tick):
@@ -422,9 +450,9 @@ Source code for utils2p.synchronization
current_frame = 0
first_camera_used = sorted(list(capture_info["Frame Counts"].keys()))[0]
for i, (start, stop) in enumerate(
- zip(rising_edges[: last_tick], rising_edges[1 : last_tick + 1])
- ):
- if capture_info["Frame Counts"][first_camera_used][str(current_frame + 1)] <= i:
+ zip(rising_edges[:last_tick], rising_edges[1:last_tick + 1])):
+ if capture_info["Frame Counts"][first_camera_used][str(current_frame +
+ 1)] <= i:
current_frame += 1
processed_line[start:stop] = current_frame
return processed_line.astype(np.int)
@@ -445,7 +473,7 @@ Source code for utils2p.synchronization
given steps_per_frame has to be set.
steps_per_frame : int
Number of steps the frame counter takes per frame.
- This includes fly back frame and averaging, i.e. if you
+ This includes fly back frames and averaging, i.e. if you
acquire one frame and flyback frames is set to 3 this number
should be 4.
@@ -453,7 +481,8 @@ Source code for utils2p.synchronization
-------
processed_frame_counter : numpy array
Array with frame number for each time point.
- If no frame was recorded at a time point the value is -9223372036854775808.
+ If no frame was recorded at a time point,
+ the value is -9223372036854775808.
Examples
--------
@@ -475,10 +504,11 @@ Source code for utils2p.synchronization
>>> processed_frame_counter = utils2p.synchronization.process_frame_counter(frame_counter, steps_per_frame=steps_per_frame)
>>> set(processed_frame_counter)
{0, -9223372036854775808}
-
+
By default the function treat volumes as frames.
If you want to treat every slice of the volume as a separate frame,
you can do so by `steps_per_frame`. The example has three steps in z.
+
>>> steps_per_frame = metadata.get_n_averaging()
>>> steps_per_frame
10
@@ -487,41 +517,46 @@ Source code for utils2p.synchronization
{0, 1, 2, -9223372036854775808}
"""
if metadata is not None and steps_per_frame is not None:
- warnings.warn("metadata argument will be ignored because steps_per_frame argument was set.")
- if metadata is not None and type(metadata) != main.Metadata:
- raise TypeError("metadata argument must be of type utils2p.Metadata or None.")
- if steps_per_frame is not None and type(steps_per_frame) != int:
+ warnings.warn("metadata argument will be ignored " +
+ "because steps_per_frame argument was set.")
+ if metadata is not None and isinstance(metadata, main.Metadata):
+ raise TypeError(
+ "metadata argument must be of type utils2p.Metadata or None.")
+ if steps_per_frame is not None and isinstance(steps_per_frame, int):
raise TypeError("steps_per_frame has to be of type int")
if metadata is not None and steps_per_frame is None:
if metadata.get_value("Streaming", "zFastEnable") == "0":
steps_per_frame = 1
else:
- steps_per_frame = metadata.get_n_z()
+ steps_per_frame = metadata.get_n_z()
if metadata.get_value("Streaming", "enable") == "1":
steps_per_frame += metadata.get_n_flyback_frames()
- if metadata.get_value("LSM", "averageMode") == "1" and metadata.get_area_mode() not in ["line", "kymograph"]:
+ if metadata.get_value(
+ "LSM",
+ "averageMode") == "1" and metadata.get_area_mode() not in [
+ "line", "kymograph"
+ ]:
steps_per_frame = steps_per_frame * metadata.get_n_averaging()
elif steps_per_frame is None:
- raise ValueError("If no metadata object is given, the steps_per_frame argument has to be set.")
+ raise ValueError("If no metadata object is given, " +
+ "the steps_per_frame argument has to be set.")
processed_frame_counter = np.ones_like(line) * np.nan
rising_edges = edges(line, (0, np.inf))[0]
-
- #Case of one frame/volume only
+
+ # Case of one frame/volume only
if len(rising_edges) <= steps_per_frame:
- processed_frame_counter[rising_edges[0] : ] = 0
+ processed_frame_counter[rising_edges[0]:] = 0
return processed_frame_counter.astype(np.int)
-
+
for i, index in enumerate(
- range(0, len(rising_edges) - steps_per_frame, steps_per_frame)
- ):
+ range(0,
+ len(rising_edges) - steps_per_frame, steps_per_frame)):
processed_frame_counter[
- rising_edges[index] : rising_edges[index + steps_per_frame]
- ] = i
- processed_frame_counter[rising_edges[-steps_per_frame] :] = (
- processed_frame_counter[rising_edges[-steps_per_frame] - 1] + 1
- )
+ rising_edges[index]:rising_edges[index + steps_per_frame]] = i
+ processed_frame_counter[rising_edges[-1 * steps_per_frame]:] = (
+ processed_frame_counter[rising_edges[-1 * steps_per_frame] - 1] + 1)
return processed_frame_counter.astype(np.int)
@@ -569,7 +604,7 @@ Source code for utils2p.synchronization
time point. If the value is -9223372036854775808, no optical flow
value was recorded for this time point.
- Note: Due to the time it take to transfer the data
+ Note: Due to the time it takes to transfer the data
from the Arduino to the computer it is possible that
the last optical flow data point is missing, i.e.
the processed optical flow line indicates one more
@@ -606,12 +641,9 @@ Source code for utils2p.synchronization
processed_optical_flow_line = np.ones_like(line) * np.nan
rising_edges = edges(line, (0, np.inf))[0]
for i in range(0, len(rising_edges) - 1):
- processed_optical_flow_line[
- rising_edges[i] : rising_edges[i + 1]
- ] = i
- processed_optical_flow_line[rising_edges[-1] :] = (
- processed_optical_flow_line[rising_edges[-1] - 1] + 1
- )
+ processed_optical_flow_line[rising_edges[i]:rising_edges[i + 1]] = i
+ processed_optical_flow_line[rising_edges[-1]:] = (
+ processed_optical_flow_line[rising_edges[-1] - 1] + 1)
return processed_optical_flow_line.astype(np.int)
@@ -662,7 +694,7 @@ Source code for utils2p.synchronization
last_idx = indices[-1]
cropped_lines = []
for line in lines:
- cropped_lines.append(line[first_idx : last_idx + 1])
+ cropped_lines.append(line[first_idx:last_idx + 1])
return tuple(cropped_lines)
@@ -694,8 +726,8 @@ Source code for utils2p.synchronization
>>> line_names = ["Frame Counter", "Basler"]
>>> (frame_counter, cam_line,) = utils2p.synchronization.get_lines_from_h5_file(h5_file, line_names)
>>> frame_counter = utils2p.synchronization.process_frame_counter(frame_counter, steps_per_frame=4)
- >>> capture_json = utils2p.find_seven_camera_metadata_file("data/mouse_kidney_raw")
- >>> cam_line = utils2p.synchronization.process_cam_line(cam_line, capture_json)
+ >>> seven_camera_metadata = utils2p.find_seven_camera_metadata_file("data/mouse_kidney_raw")
+ >>> cam_line = utils2p.synchronization.process_cam_line(cam_line, seven_camera_metadata)
>>> utils2p.synchronization.beh_idx_to_2p_idx(np.array([0,]), cam_line, frame_counter)
array([-9223372036854775808])
>>> utils2p.synchronization.beh_idx_to_2p_idx(np.array([10,]), cam_line, frame_counter)
@@ -720,7 +752,6 @@ Source code for utils2p.synchronization
for i, frame_num in enumerate(beh_indices):
thor_sync_index = thor_sync_indices[frame_num]
- beh_frame_num = cam_line[thor_sync_index]
indices_2p[i] = frame_counter[thor_sync_index]
return indices_2p.astype(np.int)
@@ -728,7 +759,7 @@ Source code for utils2p.synchronization
def reduce_during_2p_frame(frame_counter, values, function):
"""
- Reduces all values occuring during the acquisition of a
+ Reduces all values occurring during the acquisition of a
2-photon frame to a single value using the `function`
given by the user.
@@ -772,13 +803,15 @@ Source code for utils2p.synchronization
>>> set(stimulus_during_2p_frames)
{0.0, 1.0}
"""
- import warnings
- warnings.warn("reduce_during_2p_frame is deprecated use reduce_during_frame instead", DeprecationWarning)
+ warnings.warn(
+ "reduce_during_2p_frame is deprecated use reduce_during_frame instead",
+ DeprecationWarning)
return reduce_during_frame(frame_counter, values, function)
+
[docs]def reduce_during_frame(line, values, function):
"""
- Reduces all values occuring during the acquisition of a
+ Reduces all values occurring during the acquisition of a
frame to a single value using the `function` given by the user.
The line function should be of the resolution of
the ThorSync ticks and have the frame index as values.
@@ -827,16 +860,16 @@ Source code for utils2p.synchronization
"""
if len(line) != len(values):
raise ValueError("line and values need to have the same length.")
-
+
thor_sync_indices = tuple(edges(line, (0, np.inf))[0])
-
+
starts = thor_sync_indices
- stops = thor_sync_indices[1:] + (len(line),)
-
+ stops = thor_sync_indices[1:] + (len(line), )
+
if not line[0] == -9223372036854775808:
- starts = (0,) + starts
- stops = (thor_sync_indices[0],) + stops
-
+ starts = (0, ) + starts
+ stops = (thor_sync_indices[0], ) + stops
+
dtype = values.dtype
if np.issubdtype(dtype, np.number):
dtype = np.float
@@ -850,7 +883,7 @@ Source code for utils2p.synchronization
return reduced
-class SyncMetadata(main._XMLFile):
+[docs]class SyncMetadata(main._XMLFile):
"""
Class for managing ThorSync metadata.
Loads metadata file 'ThorRealTimeDataSettings.xml'
@@ -873,7 +906,6 @@ Source code for utils2p.synchronization
>>> type(metadata)
<class 'utils2p.synchronization.SyncMetadata'>
"""
-
def get_active_devices(self):
active_devices = []
for device in self.get_value("DaqDevices", "AcquireBoard"):
@@ -881,7 +913,6 @@ Source code for utils2p.synchronization
active_devices.append(device)
return active_devices
-
def get_freq(self):
"""
Returns the frequency of the ThorSync
@@ -905,15 +936,20 @@ Source code for utils2p.synchronization
for element in device.findall("SampleRate"):
if element.attrib["enable"] == "1":
if set_for_device:
- raise ValueError("Invalid metadata file. Multiple sample rates are enabled for device {device.type}")
+ raise ValueError(
+ "Invalid metadata file. Multiple sample rates " +
+ f"are enabled for device {device.type}")
if sample_rate != -1:
raise ValueError("Multiple devices are enabled.")
sample_rate = int(element.attrib["rate"])
set_for_device = True
- return sample_rate
+ return sample_rate
-def processed_lines(sync_file, sync_metadata_file, metadata_2p_file, seven_camera_metadata_file=None):
+[docs]def get_processed_lines(sync_file,
+ sync_metadata_file,
+ metadata_2p_file,
+ seven_camera_metadata_file=None):
"""
This function extracts all the standard lines and processes them.
It works for both microscopes.
@@ -933,7 +969,7 @@ Source code for utils2p.synchronization
-------
processed_lines : dictionary
Dictionary with all processed lines.
-
+
Examples
--------
>>> import utils2p
@@ -946,42 +982,59 @@ Source code for utils2p.synchronization
>>> processed_lines = utils2p.synchronization.processed_lines(sync_file, sync_metadata_file, metadata_file, seven_camera_metadata_file)
"""
processed_lines = {}
- processed_lines["Capture On"], processed_lines["Frame Counter"] = get_lines_from_h5_file(sync_file, ["Capture On", "Frame Counter"])
+ processed_lines["Capture On"], processed_lines[
+ "Frame Counter"] = get_lines_from_sync_file(
+ sync_file, ["Capture On", "Frame Counter"])
try:
# For microscope 1
- processed_lines["CO2"], processed_lines["Cameras"], processed_lines["Optical flow"] = get_lines_from_h5_file(sync_file, ["CO2_Stim", "Basler", "OpFlow",])
+ processed_lines["CO2"], processed_lines["Cameras"], processed_lines[
+ "Optical flow"] = get_lines_from_sync_file(sync_file, [
+ "CO2_Stim",
+ "Basler",
+ "OpFlow",
+ ])
except KeyError:
# For microscope 2
- processed_lines["CO2"], processed_lines["Cameras"] = get_lines_from_h5_file(sync_file, ["CO2", "Cameras",])
-
+ processed_lines["CO2"], processed_lines[
+ "Cameras"] = get_lines_from_h5_file(sync_file, [
+ "CO2",
+ "Cameras",
+ ])
- processed_lines["Cameras"] = process_cam_line(processed_lines["Cameras"], seven_camera_metadata_file)
+ processed_lines["Cameras"] = process_cam_line(processed_lines["Cameras"],
+ seven_camera_metadata_file)
metadata_2p = main.Metadata(metadata_2p_file)
- processed_lines["Frame Counter"] = process_frame_counter(processed_lines["Frame Counter"], metadata_2p)
+ processed_lines["Frame Counter"] = process_frame_counter(
+ processed_lines["Frame Counter"], metadata_2p)
processed_lines["CO2"] = process_stimulus_line(processed_lines["CO2"])
-
+
if "Optical flow" in processed_lines.keys():
- processed_lines["Optical flow"] = process_optical_flow_line(processed_lines["Optical flow"])
+ processed_lines["Optical flow"] = process_optical_flow_line(
+ processed_lines["Optical flow"])
- mask = np.logical_and(processed_lines["Capture On"], processed_lines["Frame Counter"] >= 0)
+ mask = np.logical_and(processed_lines["Capture On"],
+ processed_lines["Frame Counter"] >= 0)
- # Make sure the clipping start just before the acquisition of the first frame
+ # Make sure the clipping start just before the
+ # acquisition of the first frame
indices = np.where(mask)[0]
mask[max(0, indices[0] - 1)] = True
- for line_name, line in processed_lines.items():
- processed_lines[line_name] = crop_lines(mask, [processed_lines[line_name],])[0]
-
+ for line_name, _ in processed_lines.items():
+ processed_lines[line_name] = crop_lines(mask, [
+ processed_lines[line_name],
+ ])[0]
+
# Get times of ThorSync ticks
metadata = SyncMetadata(sync_metadata_file)
freq = metadata.get_freq()
times = get_times(len(processed_lines["Frame Counter"]), freq)
processed_lines["Times"] = times
- return processed_lines
+ return processed_lines
[docs]def epoch_length_filter(line, cut_off):
@@ -1006,25 +1059,47 @@ Source code for utils2p.synchronization
rising_edges = np.where(diff > 0)[0]
falling_edges = np.where(diff < 0)[0]
epoch_length = falling_edges - rising_edges
-
+
discarded_epochs = (epoch_length < cut_off)
-
+
discarded_rising_edges = rising_edges[discarded_epochs]
discarded_falling_edges = falling_edges[discarded_epochs]
filtered = line.copy()
for start, stop in zip(discarded_rising_edges, discarded_falling_edges):
filtered[start:stop] = 0
-
+
return filtered.astype(bool)
-[docs]def process_odor_line(line, freq=30000, arduino_commands=("None", "Odor1", "Odor2", "Odor3", "Odor4", "Odor5", "Odor6"), step_size=0.65, filter_only=False):
+[docs]def process_odor_line(line,
+ freq=30000,
+ arduino_commands=(
+ "None",
+ "Odor1",
+ "Odor2",
+ "Odor3",
+ "Odor4",
+ "Odor5",
+ "Odor6",
+ "Odor1R",
+ "Odor2R",
+ "Odor1L",
+ "Odor2L",
+ "Odor1B",
+ "Odor2B",
+ "WaterB",
+ "bubbleMFC_R0",
+ "MFC1_R2",
+ "MFC2_L1",
+ ),
+ step_size=0.2703,
+ filter_only=False):
"""
- The odor line is based on a PWM signal for the Arduino controlling the valves.
- This function applies a Butterworth filter and converts the resulting voltages
- to level indices. The corresponding the setting of the valves are given by
- the `arduino_commands` argument.
+ The odor line is based on a PWM signal for the Arduino controlling the
+ valves. This function applies a Butterworth filter and converts the
+ resulting voltages to level indices. The corresponding the setting of the
+ valves are given by the `arduino_commands` argument.
Parameters
----------
@@ -1034,15 +1109,16 @@ Source code for utils2p.synchronization
Frequency of ThorSync. Necessary for the Butterworth filter.
arduino_commands : list of strings
Description of the valve settings for commands sent to arduino.
- Note: The order matters since the serial communications between computer
- and Arduino is based on the index in the list. This index is converted
- to a PWM signal that is recorded by ThorSync.
+ Note: The order matters since the serial communications between
+ computer and Arduino is based on the index in the list.
+ This index is converted to a PWM signal that is recorded by ThorSync.
step_size : float
The voltage step size between different levels of the PWM. This is used
to convert the voltage to indices.
filter_only : bool
- If `True`, only the filtered line is returned instead of the odors based
- on the `arduino_commands`. This is useful for determining the `step_size`.
+ If `True`, only the filtered line is returned instead of the odors
+ based on the `arduino_commands`. This is useful for determining
+ the `step_size`.
Returns
-------
@@ -1058,7 +1134,7 @@ Source code for utils2p.synchronization
filtered_mask = epoch_length_filter(mask, freq)
indices[mask & ~filtered_mask] = 0
return np.array(arduino_commands)[indices]
-
+
[docs]def event_based_frame_indices(event_indicator):
"""
@@ -1088,36 +1164,46 @@ Source code for utils2p.synchronization
inv_mask = inv_mask[::-1]
mask = mask.astype(np.int8)
inv_mask = inv_mask.astype(np.int8)
- mask = np.concatenate(([0,], mask))
- inv_mask = np.concatenate((inv_mask, [0,]))
-
+ mask = np.concatenate(([
+ 0,
+ ], mask))
+ inv_mask = np.concatenate((inv_mask, [
+ 0,
+ ]))
+
event_numbers = np.cumsum(np.clip(np.diff(mask), 0, None))
inv_event_numbers = np.cumsum(np.clip(np.diff(inv_mask), 0, None))
mask = mask[1:]
inv_mask = inv_mask[:-1]
-
+
# Count up from zero during the event
event_frame_indices = np.cumsum(mask)
inv_event_frame_indices = np.cumsum(inv_mask)
n_events = max(event_numbers)
for event in np.arange(1, n_events + 1):
i = np.where(event_numbers == event)
- event_frame_indices[i] = event_frame_indices[i] - event_frame_indices[i[0][0]]
+ event_frame_indices[i] = event_frame_indices[i] - \
+ event_frame_indices[i[0][0]]
event_frame_indices[~mask.astype(np.bool)] = 0
-
+
# Count down from zero before each event
n_inv_event = max(inv_event_numbers)
for inv_event in np.arange(1, n_inv_event + 1):
i = np.where(inv_event_numbers == inv_event)
- inv_event_frame_indices[i] = inv_event_frame_indices[i] - inv_event_frame_indices[i[0][0]]
+ inv_event_frame_indices[i] = inv_event_frame_indices[i] - \
+ inv_event_frame_indices[i[0][0]]
inv_event_frame_indices[~inv_mask.astype(np.bool)] = 0
inv_event_frame_indices = -inv_event_frame_indices[::-1]
-
- event_frame_indices[~mask.astype(bool)] = inv_event_frame_indices[~mask.astype(bool)]
-
- event_numbers = np.cumsum(-1 * np.clip(np.diff(np.concatenate(([2], event_frame_indices))), -1, 0))
- # Make sure the last frames are not counted as the pre-event frames of a new event
+
+ event_frame_indices[~mask.astype(bool)] = inv_event_frame_indices[
+ ~mask.astype(bool)]
+
+ event_numbers = np.cumsum(
+ -1 * np.clip(np.diff(np.concatenate(
+ ([2], event_frame_indices))), -1, 0))
+ # Make sure the last frames are not counted as the pre-event
+ # frames of a new event
n_events = max(event_numbers)
last_event = np.where(event_numbers == n_events)
if np.all(event_frame_indices[last_event] < 0):
@@ -1142,7 +1228,7 @@ utils2p
Navigation
-
+
- Core functions
- Synchronization functions
@@ -1162,7 +1248,7 @@ Related Topics
Quick search
@@ -1184,7 +1270,7 @@ Quick search
©2020, Florian Aymanns.
|
- Powered by Sphinx 2.4.4
+ Powered by Sphinx 4.1.2
& Alabaster 0.7.12
diff --git a/docs/build/html/_sources/loading_preprocessing.rst.txt b/docs/build/html/_sources/loading_preprocessing.rst.txt
index c8d405d..c0caa3d 100644
--- a/docs/build/html/_sources/loading_preprocessing.rst.txt
+++ b/docs/build/html/_sources/loading_preprocessing.rst.txt
@@ -1,9 +1,10 @@
Loading and preprocessing
=========================
-.. autofunction:: utils2p.synchronization.get_lines_from_h5_file
+.. autofunction:: utils2p.synchronization.get_lines_from_sync_file
.. autofunction:: utils2p.synchronization.process_cam_line
.. autofunction:: utils2p.synchronization.process_frame_counter
.. autofunction:: utils2p.synchronization.process_optical_flow_line
.. autofunction:: utils2p.synchronization.process_stimulus_line
.. autofunction:: utils2p.synchronization.process_odor_line
+.. autofunction:: utils2p.synchronization.get_processed_lines
diff --git a/docs/build/html/_sources/misc_sync.rst.txt b/docs/build/html/_sources/misc_sync.rst.txt
index be6b312..f1bb3e9 100644
--- a/docs/build/html/_sources/misc_sync.rst.txt
+++ b/docs/build/html/_sources/misc_sync.rst.txt
@@ -8,3 +8,4 @@ Miscellaneous
.. autofunction:: utils2p.synchronization.correct_split_edges
.. autofunction:: utils2p.synchronization.epoch_length_filter
.. autofunction:: utils2p.synchronization.event_based_frame_indices
+.. autoclass:: utils2p.synchronization.SyncMetadata
diff --git a/docs/build/html/_static/basic.css b/docs/build/html/_static/basic.css
index 0119285..912859b 100644
--- a/docs/build/html/_static/basic.css
+++ b/docs/build/html/_static/basic.css
@@ -4,7 +4,7 @@
*
* Sphinx stylesheet -- basic theme.
*
- * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -15,6 +15,12 @@ div.clearer {
clear: both;
}
+div.section::after {
+ display: block;
+ content: '';
+ clear: left;
+}
+
/* -- relbar ---------------------------------------------------------------- */
div.related {
@@ -124,7 +130,7 @@ ul.search li a {
font-weight: bold;
}
-ul.search li div.context {
+ul.search li p.context {
color: #888;
margin: 2px 0 0 30px;
text-align: left;
@@ -271,25 +277,25 @@ p.rubric {
font-weight: bold;
}
-img.align-left, .figure.align-left, object.align-left {
+img.align-left, figure.align-left, .figure.align-left, object.align-left {
clear: left;
float: left;
margin-right: 1em;
}
-img.align-right, .figure.align-right, object.align-right {
+img.align-right, figure.align-right, .figure.align-right, object.align-right {
clear: right;
float: right;
margin-left: 1em;
}
-img.align-center, .figure.align-center, object.align-center {
+img.align-center, figure.align-center, .figure.align-center, object.align-center {
display: block;
margin-left: auto;
margin-right: auto;
}
-img.align-default, .figure.align-default {
+img.align-default, figure.align-default, .figure.align-default {
display: block;
margin-left: auto;
margin-right: auto;
@@ -313,24 +319,31 @@ img.align-default, .figure.align-default {
/* -- sidebars -------------------------------------------------------------- */
-div.sidebar {
+div.sidebar,
+aside.sidebar {
margin: 0 0 0.5em 1em;
border: 1px solid #ddb;
- padding: 7px 7px 0 7px;
+ padding: 7px;
background-color: #ffe;
width: 40%;
float: right;
+ clear: right;
+ overflow-x: auto;
}
p.sidebar-title {
font-weight: bold;
}
+div.admonition, div.topic, blockquote {
+ clear: left;
+}
+
/* -- topics ---------------------------------------------------------------- */
div.topic {
border: 1px solid #ccc;
- padding: 7px 7px 0 7px;
+ padding: 7px;
margin: 10px 0 10px 0;
}
@@ -352,10 +365,6 @@ div.admonition dt {
font-weight: bold;
}
-div.admonition dl {
- margin-bottom: 0;
-}
-
p.admonition-title {
margin: 0px 10px 5px 0px;
font-weight: bold;
@@ -366,9 +375,30 @@ div.body p.centered {
margin-top: 25px;
}
+/* -- content of sidebars/topics/admonitions -------------------------------- */
+
+div.sidebar > :last-child,
+aside.sidebar > :last-child,
+div.topic > :last-child,
+div.admonition > :last-child {
+ margin-bottom: 0;
+}
+
+div.sidebar::after,
+aside.sidebar::after,
+div.topic::after,
+div.admonition::after,
+blockquote::after {
+ display: block;
+ content: '';
+ clear: both;
+}
+
/* -- tables ---------------------------------------------------------------- */
table.docutils {
+ margin-top: 10px;
+ margin-bottom: 10px;
border: 0;
border-collapse: collapse;
}
@@ -416,32 +446,34 @@ table.citation td {
border-bottom: none;
}
-th > p:first-child,
-td > p:first-child {
+th > :first-child,
+td > :first-child {
margin-top: 0px;
}
-th > p:last-child,
-td > p:last-child {
+th > :last-child,
+td > :last-child {
margin-bottom: 0px;
}
/* -- figures --------------------------------------------------------------- */
-div.figure {
+div.figure, figure {
margin: 0.5em;
padding: 0.5em;
}
-div.figure p.caption {
+div.figure p.caption, figcaption {
padding: 0.3em;
}
-div.figure p.caption span.caption-number {
+div.figure p.caption span.caption-number,
+figcaption span.caption-number {
font-style: italic;
}
-div.figure p.caption span.caption-text {
+div.figure p.caption span.caption-text,
+figcaption span.caption-text {
}
/* -- field list styles ----------------------------------------------------- */
@@ -468,10 +500,71 @@ table.field-list td, table.field-list th {
/* -- hlist styles ---------------------------------------------------------- */
+table.hlist {
+ margin: 1em 0;
+}
+
table.hlist td {
vertical-align: top;
}
+/* -- object description styles --------------------------------------------- */
+
+.sig {
+ font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+}
+
+.sig-name, code.descname {
+ background-color: transparent;
+ font-weight: bold;
+}
+
+.sig-name {
+ font-size: 1.1em;
+}
+
+code.descname {
+ font-size: 1.2em;
+}
+
+.sig-prename, code.descclassname {
+ background-color: transparent;
+}
+
+.optional {
+ font-size: 1.3em;
+}
+
+.sig-paren {
+ font-size: larger;
+}
+
+.sig-param.n {
+ font-style: italic;
+}
+
+/* C++ specific styling */
+
+.sig-inline.c-texpr,
+.sig-inline.cpp-texpr {
+ font-family: unset;
+}
+
+.sig.c .k, .sig.c .kt,
+.sig.cpp .k, .sig.cpp .kt {
+ color: #0033B3;
+}
+
+.sig.c .m,
+.sig.cpp .m {
+ color: #1750EB;
+}
+
+.sig.c .s, .sig.c .sc,
+.sig.cpp .s, .sig.cpp .sc {
+ color: #067D17;
+}
+
/* -- other body styles ----------------------------------------------------- */
@@ -495,17 +588,37 @@ ol.upperroman {
list-style: upper-roman;
}
-li > p:first-child {
+:not(li) > ol > li:first-child > :first-child,
+:not(li) > ul > li:first-child > :first-child {
margin-top: 0px;
}
-li > p:last-child {
+:not(li) > ol > li:last-child > :last-child,
+:not(li) > ul > li:last-child > :last-child {
margin-bottom: 0px;
}
+ol.simple ol p,
+ol.simple ul p,
+ul.simple ol p,
+ul.simple ul p {
+ margin-top: 0;
+}
+
+ol.simple > li:not(:first-child) > p,
+ul.simple > li:not(:first-child) > p {
+ margin-top: 0;
+}
+
+ol.simple p,
+ul.simple p {
+ margin-bottom: 0;
+}
+
dl.footnote > dt,
dl.citation > dt {
float: left;
+ margin-right: 0.5em;
}
dl.footnote > dd,
@@ -546,7 +659,7 @@ dl {
margin-bottom: 15px;
}
-dd > p:first-child {
+dd > :first-child {
margin-top: 0px;
}
@@ -560,6 +673,11 @@ dd {
margin-left: 30px;
}
+dl > dd:last-child,
+dl > dd:last-child > :last-child {
+ margin-bottom: 0;
+}
+
dt:target, span.highlighted {
background-color: #fbe54e;
}
@@ -573,14 +691,6 @@ dl.glossary dt {
font-size: 1.1em;
}
-.optional {
- font-size: 1.3em;
-}
-
-.sig-paren {
- font-size: larger;
-}
-
.versionmodified {
font-style: italic;
}
@@ -637,6 +747,10 @@ pre {
overflow-y: hidden; /* fixes display issues on Chrome browsers */
}
+pre, div[class*="highlight-"] {
+ clear: both;
+}
+
span.pre {
-moz-hyphens: none;
-ms-hyphens: none;
@@ -644,22 +758,57 @@ span.pre {
hyphens: none;
}
+div[class*="highlight-"] {
+ margin: 1em 0;
+}
+
td.linenos pre {
- padding: 5px 0px;
border: 0;
background-color: transparent;
color: #aaa;
}
table.highlighttable {
- margin-left: 0.5em;
+ display: block;
+}
+
+table.highlighttable tbody {
+ display: block;
+}
+
+table.highlighttable tr {
+ display: flex;
}
table.highlighttable td {
- padding: 0 0.5em 0 0.5em;
+ margin: 0;
+ padding: 0;
+}
+
+table.highlighttable td.linenos {
+ padding-right: 0.5em;
+}
+
+table.highlighttable td.code {
+ flex: 1;
+ overflow: hidden;
+}
+
+.highlight .hll {
+ display: block;
+}
+
+div.highlight pre,
+table.highlighttable pre {
+ margin: 0;
+}
+
+div.code-block-caption + div {
+ margin-top: 0;
}
div.code-block-caption {
+ margin-top: 1em;
padding: 2px 5px;
font-size: small;
}
@@ -668,12 +817,14 @@ div.code-block-caption code {
background-color: transparent;
}
-div.code-block-caption + div > div.highlight > pre {
- margin-top: 0;
-}
-
-div.doctest > div.highlight span.gp { /* gp: Generic.Prompt */
- user-select: none;
+table.highlighttable td.linenos,
+span.linenos,
+div.highlight span.gp { /* gp: Generic.Prompt */
+ user-select: none;
+ -webkit-user-select: text; /* Safari fallback only */
+ -webkit-user-select: none; /* Chrome/Safari */
+ -moz-user-select: none; /* Firefox */
+ -ms-user-select: none; /* IE10+ */
}
div.code-block-caption span.caption-number {
@@ -685,21 +836,7 @@ div.code-block-caption span.caption-text {
}
div.literal-block-wrapper {
- padding: 1em 1em 0;
-}
-
-div.literal-block-wrapper div.highlight {
- margin: 0;
-}
-
-code.descname {
- background-color: transparent;
- font-weight: bold;
- font-size: 1.2em;
-}
-
-code.descclassname {
- background-color: transparent;
+ margin: 1em 0;
}
code.xref, a code {
@@ -740,8 +877,7 @@ span.eqno {
}
span.eqno a.headerlink {
- position: relative;
- left: 0px;
+ position: absolute;
z-index: 1;
}
diff --git a/docs/build/html/_static/doctools.js b/docs/build/html/_static/doctools.js
index daccd20..8cbf1b1 100644
--- a/docs/build/html/_static/doctools.js
+++ b/docs/build/html/_static/doctools.js
@@ -4,7 +4,7 @@
*
* Sphinx JavaScript utilities for all documentation.
*
- * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -29,9 +29,14 @@ if (!window.console || !console.firebug) {
/**
* small helper function to urldecode strings
+ *
+ * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL
*/
jQuery.urldecode = function(x) {
- return decodeURIComponent(x).replace(/\+/g, ' ');
+ if (!x) {
+ return x
+ }
+ return decodeURIComponent(x.replace(/\+/g, ' '));
};
/**
@@ -285,9 +290,10 @@ var Documentation = {
initOnKeyListeners: function() {
$(document).keydown(function(event) {
var activeElementType = document.activeElement.tagName;
- // don't navigate when in search box or textarea
+ // don't navigate when in search box, textarea, dropdown or button
if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT'
- && !event.altKey && !event.ctrlKey && !event.metaKey && !event.shiftKey) {
+ && activeElementType !== 'BUTTON' && !event.altKey && !event.ctrlKey && !event.metaKey
+ && !event.shiftKey) {
switch (event.keyCode) {
case 37: // left
var prevHref = $('link[rel="prev"]').prop('href');
@@ -295,12 +301,14 @@ var Documentation = {
window.location.href = prevHref;
return false;
}
+ break;
case 39: // right
var nextHref = $('link[rel="next"]').prop('href');
if (nextHref) {
window.location.href = nextHref;
return false;
}
+ break;
}
}
});
diff --git a/docs/build/html/_static/documentation_options.js b/docs/build/html/_static/documentation_options.js
index 4790c4d..2fa8c97 100644
--- a/docs/build/html/_static/documentation_options.js
+++ b/docs/build/html/_static/documentation_options.js
@@ -5,6 +5,7 @@ var DOCUMENTATION_OPTIONS = {
COLLAPSE_INDEX: false,
BUILDER: 'html',
FILE_SUFFIX: '.html',
+ LINK_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: '.txt',
NAVIGATION_WITH_KEYS: false
diff --git a/docs/build/html/_static/jquery-3.4.1.js b/docs/build/html/_static/jquery-3.4.1.js
deleted file mode 100644
index 773ad95..0000000
--- a/docs/build/html/_static/jquery-3.4.1.js
+++ /dev/null
@@ -1,10598 +0,0 @@
-/*!
- * jQuery JavaScript Library v3.4.1
- * https://jquery.com/
- *
- * Includes Sizzle.js
- * https://sizzlejs.com/
- *
- * Copyright JS Foundation and other contributors
- * Released under the MIT license
- * https://jquery.org/license
- *
- * Date: 2019-05-01T21:04Z
- */
-( function( global, factory ) {
-
- "use strict";
-
- if ( typeof module === "object" && typeof module.exports === "object" ) {
-
- // For CommonJS and CommonJS-like environments where a proper `window`
- // is present, execute the factory and get jQuery.
- // For environments that do not have a `window` with a `document`
- // (such as Node.js), expose a factory as module.exports.
- // This accentuates the need for the creation of a real `window`.
- // e.g. var jQuery = require("jquery")(window);
- // See ticket #14549 for more info.
- module.exports = global.document ?
- factory( global, true ) :
- function( w ) {
- if ( !w.document ) {
- throw new Error( "jQuery requires a window with a document" );
- }
- return factory( w );
- };
- } else {
- factory( global );
- }
-
-// Pass this if window is not defined yet
-} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) {
-
-// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1
-// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode
-// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common
-// enough that all such attempts are guarded in a try block.
-"use strict";
-
-var arr = [];
-
-var document = window.document;
-
-var getProto = Object.getPrototypeOf;
-
-var slice = arr.slice;
-
-var concat = arr.concat;
-
-var push = arr.push;
-
-var indexOf = arr.indexOf;
-
-var class2type = {};
-
-var toString = class2type.toString;
-
-var hasOwn = class2type.hasOwnProperty;
-
-var fnToString = hasOwn.toString;
-
-var ObjectFunctionString = fnToString.call( Object );
-
-var support = {};
-
-var isFunction = function isFunction( obj ) {
-
- // Support: Chrome <=57, Firefox <=52
- // In some browsers, typeof returns "function" for HTML