From ee1fa8a8d70dc4beba99b424d284ccccf5de1985 Mon Sep 17 00:00:00 2001
From: Atika-Syeda
Date: Fri, 21 Jul 2023 17:15:30 -0400
Subject: [PATCH 1/2] Update readthedocs
---
docs/conf.py | 2 +-
docs/gui.rst | 21 ++++++++
docs/index.rst | 13 +++--
docs/installation.md | 2 +-
docs/outputs.rst | 66 ++++++++++++++++----------
docs/pose_tracking_cli_tutorial.md | 3 --
docs/pose_tracking_gui_tutorial.md | 2 +-
docs/pose_tracking_gui_tutorial.rst | 15 +++---
docs/roi_proc.rst | 4 +-
facemap/__init__.py | 1 +
facemap/__main__.py | 6 ++-
facemap/gui/help_windows.py | 5 +-
facemap/gui/ops_user.npy | Bin 401 -> 399 bytes
facemap/pose/pose_helper_functions.py | 3 --
facemap/version.py | 21 ++++++++
15 files changed, 112 insertions(+), 52 deletions(-)
create mode 100644 docs/gui.rst
delete mode 100644 docs/pose_tracking_cli_tutorial.md
create mode 100644 facemap/version.py
diff --git a/docs/conf.py b/docs/conf.py
index 23f37cb..77c65dc 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -24,7 +24,7 @@
author = "Carsen Stringer & Atika Syeda & Renee Tung"
# The full version, including alpha/beta/rc tags
-release = "1.0.0-rc1"
+release = "1.0.1"
# -- General configuration ---------------------------------------------------
diff --git a/docs/gui.rst b/docs/gui.rst
new file mode 100644
index 0000000..91ede91
--- /dev/null
+++ b/docs/gui.rst
@@ -0,0 +1,21 @@
+GUI
+-----
+
+Starting the GUI
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The quickest way to start is to open the GUI from a command line terminal is:
+::
+
+ python -m facemap
+
+Using the GUI
+~~~~~~~~~~~~~~~~~~~~~~~
+The GUI can be used for the processing keypoints and SVD of mouse behavioral videos. The GUI can also be used for predicting neural activity using the behavioral data. For more details on each feature, see the following tutorials:
+
+.. toctree::
+ :maxdepth: 3
+
+ pose_tracking_gui_tutorial
+ roi_proc
+ neural_activity_prediction_tutorial
diff --git a/docs/index.rst b/docs/index.rst
index 7fbc5cc..c511fd6 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,17 +1,22 @@
-Facemap
+Facemap
+
+.. figure:: https://github.com/MouseLand/facemap/blob/main/facemap/mouse.png
+ :alt: facemap
+
===================================
+
Facemap is a framework for predicting neural activity from mouse orofacial movements. It includes a pose estimation model for tracking distinct keypoints on the mouse face, a neural network model for predicting neural activity using the pose estimates, and also can be used compute the singular value decomposition (SVD) of behavioral videos.
+For more details, please see our `paper `__ and `twitter thread `__.
+
.. toctree::
:maxdepth: 3
:caption: Basics:
installation
+ gui
inputs
- pose_tracking_gui_tutorial
- roi_proc
outputs
- neural_activity_prediction_tutorial
.. toctree::
:caption: Tutorials
diff --git a/docs/installation.md b/docs/installation.md
index 4598929..6fd565a 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -1,6 +1,6 @@
# Installation (Python)
-This package only supports python 3. We recommend installing python 3 with **[Anaconda](https://www.anaconda.com/download/)**.s
+This package only supports python 3. We recommend installing python 3 with **[Anaconda](https://www.anaconda.com/download/)**.
## Common installation issues
diff --git a/docs/outputs.rst b/docs/outputs.rst
index cae6fd0..8925e73 100644
--- a/docs/outputs.rst
+++ b/docs/outputs.rst
@@ -2,21 +2,14 @@ Outputs
=======================
ROI and SVD processing
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~
+SVD processing saves two outputs: a \*.npy file and a \*.mat file. The output file contains the following variables:
+- **filenames**: A 2D list of video filenames - a list within the 2D list consists of videos recorded simultaneously whereas sequential videos are stored as a separate list
-Proccessed output
-~~~~~~~~~~~~~~~~~
+- **Ly**, **Lx**: list of frame length in y-dim (Ly) and x-dim (Lx) for each video taken simultaneously
-The GUIs create one file for all videos (saved in current folder), the
-npy file has name “videofile_proc.npy” and the mat file has name
-“videofile_proc.mat”.
-
-- **filenames**: list of lists of video filenames - each list are the videos taken simultaneously
-
-- **Ly**, **Lx**: list of number of pixels in Y (Ly) and X (Lx) for each video taken simultaneously
-
-- **sbin**: spatial bin size for motion SVDs
+- **sbin**: spatial bin size for SVDs
- **Lybin**, **Lxbin**: list of number of pixels binned by sbin in Y (Ly) and X (Lx) for each video taken simultaneously
@@ -24,27 +17,27 @@ npy file has name “videofile_proc.npy” and the mat file has name
- **LYbin**, **LXbin**: full-size of all videos embedded in rectangle (binned)
-- **fullSVD**: whether or not “multivideo SVD” is computed
+- **fullSVD**: bool flag indicating whether “multivideo SVD” is computed
-- **save_mat**: whether or not to save proc as `\*.mat` file
+- **save_mat**: bool flag indicating whether to save proc as `\*.mat` file
- **avgframe**: list of average frames for each video from a subset of frames (binned by sbin)
-- **avgframe_reshape**: average frame reshaped to be y-pixels x x-pixels
+- **avgframe_reshape**: average frame reshaped to size y-pixels by x-pixels
-- **avgmotion**: list of average motions for each video from a subset of frames (binned by sbin)
+- **avgmotion**: list of average motion computed for each video from a subset of frames (binned by sbin)
-- **avgmotion_reshape**: average motion reshaped to be y-pixels x x-pixels
+- **avgmotion_reshape**: average motion reshaped to size y-pixels by x-pixels
-- **iframes**: array containing number of frames in each consecutive video
+- **iframes**: an array containing the number of frames in each consecutive video
- **motion**: list of absolute motion energies across time - first is “multivideo” motion energy (empty if not computed)
-- **motSVD**: list of motion SVDs - first is “multivideo SVD” (empty if not computed) - each is nframes x components
+- **motSVD**: list of motion SVDs - first is “multivideo SVD” (empty if not computed) - each is of size number of frames by number of components (500)
- **motMask**: list of motion masks for each motion SVD - each motMask is pixels x components
-- **motMask_reshape**: motion masks reshaped to be y-pixels x x-pixels x components
+- **motMask_reshape**: motion masks reshaped to: y-pixels x x-pixels x components
- **motSv**: array containing singular values for motSVD
@@ -81,15 +74,38 @@ npy file has name “videofile_proc.npy” and the mat file has name
Loading outputs
''''''''''''''''''''
-Note this is a dict, e.g. to load in python:
+The \*.npy saved is a dict which can be loaded in python as follows:
::
import numpy as np
- proc = np.load('cam1_proc.npy', allow_pickle=True).item()
+ proc = np.load('filename_proc.npy', allow_pickle=True).item()
print(proc.keys())
motion = proc['motion']
-These \*_proc.npy\* files can be loaded into the GUI (and will
-automatically be loaded after processing). The checkboxes in the lower
-left allow you to view different traces from the processing.
+These \*_proc.npy\* files can be loaded in the GUI (and is
+automatically loaded after processing). The checkboxes on the lower
+left panel of the GUI can be used to toggle display of different traces/variables.
+
+Keypoints processing
+~~~~~~~~~~~~~~~~~~~~
+
+Keypoints processing saves two outputs: a \*.h5 and a \*_metadata.pkl file.
+ - \*.h5 file contains: Keypoints stored as a 3D array of shape (3, number of bodyparts, number of frames). The first dimension of size 3 is in the order: (x, y, likelihood). For more details on using/loading the \*.h5 file in python see this `tutorial `__.
+ - \*_metadata.pkl file: contains a dictionary consisting of the following variables:
+ - batch_size: batch size used for inference
+ - image_size: frame size
+ - bbox: bounding box for cropping the video [x1, x2, y1, y2]
+ - total_frames: number of frames
+ - bodyparts: names of bodyparts
+ - inference_speed: processing speed
+ To load the pkl file in python, use the following code:
+
+ ::
+
+ import pickle
+ with open('filename_metadata.pkl', 'rb') as f:
+ metadata = pickle.load(f)
+ print(metadata.keys())
+ print(metadata['bodyparts'])
+
diff --git a/docs/pose_tracking_cli_tutorial.md b/docs/pose_tracking_cli_tutorial.md
deleted file mode 100644
index d99b545..0000000
--- a/docs/pose_tracking_cli_tutorial.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Pose tracking **(CLI)**
-
-Coming soon ...
\ No newline at end of file
diff --git a/docs/pose_tracking_gui_tutorial.md b/docs/pose_tracking_gui_tutorial.md
index 61c02dd..8a1c159 100644
--- a/docs/pose_tracking_gui_tutorial.md
+++ b/docs/pose_tracking_gui_tutorial.md
@@ -24,7 +24,7 @@ Follow the steps below to generate keypoints for your videos:
- Check `Keypoints` for pose tracking.
- Click `process`.
3. Set ROI/bounding box for face region
- - A dialog box for selecting a bounding box for the face will appear. Drag the red rectangle to select region of interest on the frame where the keypoints will be tracked. Please ensure that the bouding box is focused on the face where all the keypoints will be visible. See example frames [here](figs/mouse_views.png). If a 'Face (pose)' ROI has already been added then this step will be skipped.
+ - A dialog box for selecting a bounding box for the face will appear. Drag the red rectangle to select region of interest on the frame where the keypoints will be tracked. Please ensure that the bouding box is focused on the face where all the keypoints will be visible. See example frames [here](https://github.com/MouseLand/facemap/blob/main/figs/mouse_views.png). If a 'Face (pose)' ROI has already been added then this step will be skipped.
- Click `Done` to process video. Alternatively, click `Skip` to use the entire frame region. Monitor progress bar at the bottom of the window for updates.
4. View keypoints
- Keypoints will be automatically loaded after processing.
diff --git a/docs/pose_tracking_gui_tutorial.rst b/docs/pose_tracking_gui_tutorial.rst
index f75b3eb..d9e320f 100644
--- a/docs/pose_tracking_gui_tutorial.rst
+++ b/docs/pose_tracking_gui_tutorial.rst
@@ -1,5 +1,5 @@
-Pose tracking **(GUI)** :mouse:
-===============================
+Pose tracking **(GUI)**
+========================
The latest python version is integrated with Facemap network for
tracking 14 distinct keypoints on mouse face and an additional point for
@@ -39,6 +39,7 @@ Follow the steps below to generate keypoints for your videos:
- Check ``Keypoints`` for pose tracking.
- Click ``process``.
+ - Note: The first time facemap runs for processing keypoints it downloads the latest available trained model weights from our website.
3. Set ROI/bounding box for face region
@@ -46,7 +47,7 @@ Follow the steps below to generate keypoints for your videos:
appear. Drag the red rectangle to select region of interest on the
frame where the keypoints will be tracked. Please ensure that the
bouding box is focused on the face where all the keypoints will be
- visible. See example frames `here `__. If a
+ visible. See example frames `here `__. If a
‘Face (pose)’ ROI has already been added then this step will be
skipped.
- Click ``Done`` to process video. Alternatively, click ``Skip`` to
@@ -62,7 +63,7 @@ Follow the steps below to generate keypoints for your videos:
Visualize keypoints
-------------------
-To load keypoints (*.h5) for a video generated using Facemap or other
+To load keypoints (\*.h5) for a video generated using Facemap or other
software in the same format (such as DeepLabCut and SLEAP), follow the
steps below:
@@ -75,7 +76,7 @@ steps below:
- Select ``Pose`` from the menu bar
- Select ``Load keypoints``
- - Select the keypoints (*.h5) file
+ - Select the keypoints (\*.h5) file
3. View keypoints
@@ -85,10 +86,6 @@ steps below:
keypoints with lower confidence estimates. Higher threshold will
show keypoints with higher confidence estimates.
-Note: this feature is currently only supported for single video. Please
-see `CLI instructions `__ for viewing
-keypoints for multiple videos.
-
Finetune model to refine keypoints for a video
----------------------------------------------
diff --git a/docs/roi_proc.rst b/docs/roi_proc.rst
index b809860..21222bb 100644
--- a/docs/roi_proc.rst
+++ b/docs/roi_proc.rst
@@ -1,5 +1,5 @@
-ROI and SVD processing
-==============================
+SVD processing and ROIs
+========================
Choose a type of ROI to add and then click “add ROI” to add it to the
view. The pixels in the ROI will show up in the right window (with
diff --git a/facemap/__init__.py b/facemap/__init__.py
index 14371ca..bf670cd 100644
--- a/facemap/__init__.py
+++ b/facemap/__init__.py
@@ -2,3 +2,4 @@
Copright © 2023 Howard Hughes Medical Institute, Authored by Carsen Stringer and Atika Syeda.
"""
name = "facemap"
+from facemap.version import version, version_str
\ No newline at end of file
diff --git a/facemap/__main__.py b/facemap/__main__.py
index c551135..9e55d3b 100644
--- a/facemap/__main__.py
+++ b/facemap/__main__.py
@@ -9,7 +9,7 @@
from facemap import process
from facemap.gui import gui
-
+from facemap import version_str
def tic():
return time.time()
@@ -25,6 +25,7 @@ def main():
if __name__ == "__main__":
+
parser = argparse.ArgumentParser(description="Movie files")
parser.add_argument("--ops", default=[], type=str, help="options")
parser.add_argument(
@@ -88,6 +89,9 @@ def main():
parser.set_defaults(autoload_proc=True)
args = parser.parse_args()
+
+ print(version_str)
+
ops = {}
if len(args.ops) > 0:
ops = np.load(args.ops)
diff --git a/facemap/gui/help_windows.py b/facemap/gui/help_windows.py
index d87c946..b976312 100644
--- a/facemap/gui/help_windows.py
+++ b/facemap/gui/help_windows.py
@@ -20,6 +20,7 @@
QVBoxLayout,
QWidget,
)
+from ..version import version_str
class MainWindowHelp(QDialog):
@@ -165,12 +166,12 @@ def __init__(self, parent=None, window_size=None):
License: GPLv3
- Version: 0.2.0
+ Version: {version}
Visit our github page for more information.
- """
+ """.format(version=version_str)
text = QLabel(text, self)
text.setStyleSheet(
"font-size: 12pt; font-family: Arial; color: white; text-align: center; "
diff --git a/facemap/gui/ops_user.npy b/facemap/gui/ops_user.npy
index aa077280cef29a5c845993cf905b9133480fdb23..dcebfb2529705a7f2527dfc2866a558fc872a82d 100755
GIT binary patch
delta 75
zcmbQp+|RthhtXM_fq_9kBR@A)zqq6*GcP?SF-hMgwK%&Zzo1Ysf*mNER+^I&9OhCe
a)Dgh}
Date: Fri, 21 Jul 2023 17:41:37 -0400
Subject: [PATCH 2/2] Update readthedocs with neural pred output
---
docs/inputs.rst | 4 ++--
docs/outputs.rst | 13 ++++++++++++-
docs/pose_tracking_gui_tutorial.rst | 12 ++----------
3 files changed, 16 insertions(+), 13 deletions(-)
diff --git a/docs/inputs.rst b/docs/inputs.rst
index 3dabf46..82a29a1 100644
--- a/docs/inputs.rst
+++ b/docs/inputs.rst
@@ -43,7 +43,7 @@ Load a video or a set of videos and draw your ROIs and choose your processing se
Data acquisition info
~~~~~~~~~~~~~~~~~~~~~~~~~
-IR ILLUMINATION
+IR illumination
---------------------
For recording in darkness we use `IR
@@ -67,7 +67,7 @@ tube `__.
-CAMERAS
+Cameras
---------------------
We use `ptgrey
diff --git a/docs/outputs.rst b/docs/outputs.rst
index 8925e73..c9ecb94 100644
--- a/docs/outputs.rst
+++ b/docs/outputs.rst
@@ -1,5 +1,5 @@
Outputs
-=======================
+========
ROI and SVD processing
~~~~~~~~~~~~~~~~~~~~~~~
@@ -109,3 +109,14 @@ Keypoints processing saves two outputs: a \*.h5 and a \*_metadata.pkl file.
print(metadata.keys())
print(metadata['bodyparts'])
+
+Neural activity prediction output
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The output of neural activity prediction is saved in \*.npy file and optionally in \*.mat file. The output contains a dictionary with the following keys:
+
+- predictions: a 2D array containing the predicted neural activity of shape (number of features x time)
+- test_indices: a list of indices indicating sections of data used as test data for computing variance explained by the model
+- variance_explained: variance explained by the model for test data
+- plot_extent: extent of the plot used for plotting the predicted neural activity in the order [x1, y1, x2, y2]
+
+
diff --git a/docs/pose_tracking_gui_tutorial.rst b/docs/pose_tracking_gui_tutorial.rst
index d9e320f..fa2f1a2 100644
--- a/docs/pose_tracking_gui_tutorial.rst
+++ b/docs/pose_tracking_gui_tutorial.rst
@@ -3,16 +3,8 @@ Pose tracking **(GUI)**
The latest python version is integrated with Facemap network for
tracking 14 distinct keypoints on mouse face and an additional point for
-tracking paw. The keypoints can be tracked from different camera views
-(some examples shown below).
+tracking paw. The keypoints can be tracked from different camera views (see `examples `__).
-.. raw:: html
-
-
-
-.. raw:: html
-
-
Generate keypoints
------------------
@@ -33,7 +25,7 @@ Follow the steps below to generate keypoints for your videos:
- Use the file menu to ``Set output folder``.
- The processed keypoints (``*.h5``) and metadata (``*.pkl``) will
be saved in the selected output folder or folder containing the
- video (default).
+ video (by default).
2. Process video(s)