From 8fd6591220326a3b2dd868b552ae257fa83ef415 Mon Sep 17 00:00:00 2001 From: Talmo Pereira Date: Tue, 27 Feb 2024 12:07:04 +0000 Subject: [PATCH 1/6] Add tests for using Image in TrainingFrame.source_image --- src/pynwb/ndx_pose/testing/mock/pose.py | 11 ++++- src/pynwb/tests/unit/test_pose.py | 57 +++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/src/pynwb/ndx_pose/testing/mock/pose.py b/src/pynwb/ndx_pose/testing/mock/pose.py index 051249f..13d0bfc 100644 --- a/src/pynwb/ndx_pose/testing/mock/pose.py +++ b/src/pynwb/ndx_pose/testing/mock/pose.py @@ -2,7 +2,7 @@ import numpy as np from pynwb import NWBFile -from pynwb.image import ImageSeries +from pynwb.image import ImageSeries, Image, RGBImage from pynwb.testing.mock.utils import name_generator from pynwb.testing.mock.device import mock_Device @@ -166,6 +166,11 @@ def mock_source_video( ) return source_video +def mock_source_frame( + *, + name: Optional[str] = None, +): + return RGBImage(name=name, data=np.random.rand(640, 480, 3).astype("uint8")) def mock_TrainingFrame( *, @@ -173,13 +178,15 @@ def mock_TrainingFrame( annotator: Optional[str] = "Awesome Possum", skeleton_instance: SkeletonInstance = None, source_video: ImageSeries = None, + source_frame: Image = None, source_video_frame_index: np.uint = np.uint(10), ): training_frame = TrainingFrame( name=name or name_generator("TrainingFrame"), annotator=annotator, skeleton_instance=skeleton_instance or mock_SkeletonInstance(), - source_video=source_video or mock_source_video(), + source_video=source_video or (mock_source_video() if source_frame is None else None), + source_frame=source_frame, source_video_frame_index=source_video_frame_index, ) return training_frame diff --git a/src/pynwb/tests/unit/test_pose.py b/src/pynwb/tests/unit/test_pose.py index 59d1dae..ef34618 100644 --- a/src/pynwb/tests/unit/test_pose.py +++ b/src/pynwb/tests/unit/test_pose.py @@ -4,6 +4,7 @@ from pynwb import NWBFile from pynwb.device import Device from pynwb.testing import TestCase +from pynwb.image import RGBImage from ndx_pose import ( PoseEstimationSeries, @@ -19,6 +20,7 @@ from ndx_pose.testing.mock.pose import ( mock_PoseEstimationSeries, mock_source_video, + mock_source_frame, mock_Skeleton, mock_SkeletonInstance, mock_TrainingFrame, @@ -255,6 +257,23 @@ def test_constructor(self): self.assertEqual(training_frame.source_video_frame_index, np.uint(0)) +class TestTrainingFrameImage(TestCase): + def test_constructor(self): + skeleton_instance = mock_SkeletonInstance() + source_frame = mock_source_frame(name="frame0_image") + training_frame = TrainingFrame( + name="frame0", + annotator="Awesome Possum", + skeleton_instance=skeleton_instance, + source_frame=source_frame, + source_video_frame_index=np.uint(0), + ) + self.assertEqual(training_frame.name, "frame0") + self.assertEqual(training_frame.annotator, "Awesome Possum") + self.assertIs(training_frame.skeleton_instance, skeleton_instance) + self.assertIs(training_frame.source_frame, source_frame) + self.assertEqual(training_frame.source_video_frame_index, np.uint(0)) + class TestPoseTraining(TestCase): def test_constructor(self): skeleton1 = mock_Skeleton(name="subject1") @@ -292,3 +311,41 @@ def test_constructor(self): self.assertIs(pose_training.training_frames.training_frames["skeleton2_frame10"], sk2_training_frame) self.assertEqual(len(pose_training.source_videos.image_series), 1) self.assertIs(pose_training.source_videos.image_series["source_video"], source_video) + + +class TestPoseTrainingImages(TestCase): + def test_constructor(self): + skeleton1 = mock_Skeleton(name="subject1") + skeleton2 = mock_Skeleton(name="subject2") + source_frame_10 = mock_source_frame(name="source_frame_10") + sk1_instance10 = mock_SkeletonInstance(id=np.uint(10), skeleton=skeleton1) + sk1_training_frame = mock_TrainingFrame( + name="frame10", + skeleton_instance=sk1_instance10, + source_frame=source_frame_10, + source_video_frame_index=np.uint(10), + ) + + source_frame_11 = mock_source_frame(name="source_frame_11") + sk2_instance11 = mock_SkeletonInstance(id=np.uint(11), skeleton=skeleton2) + sk2_training_frame = mock_TrainingFrame( + name="frame11", + skeleton_instance=sk2_instance11, + source_frame=source_frame_11, + source_video_frame_index=np.uint(11), + ) + + skeletons = Skeletons(skeletons=[skeleton1, skeleton2]) + training_frames = TrainingFrames(training_frames=[sk1_training_frame, sk2_training_frame]) + + pose_training = PoseTraining( + skeletons=skeletons, + training_frames=training_frames, + ) + self.assertEqual(len(pose_training.skeletons.skeletons), 2) + self.assertIs(pose_training.skeletons.skeletons["subject1"], skeleton1) + self.assertIs(pose_training.skeletons.skeletons["subject2"], skeleton2) + self.assertEqual(len(pose_training.training_frames.training_frames), 2) + self.assertIs(pose_training.training_frames.training_frames["frame10"], sk1_training_frame) + self.assertIs(pose_training.training_frames.training_frames["frame11"], sk2_training_frame) + self.assertIsNone(pose_training.source_videos) \ No newline at end of file From 1cd136356c7b00914a3fde2f91305f736fbfb7d8 Mon Sep 17 00:00:00 2001 From: Talmo Pereira Date: Wed, 28 Feb 2024 05:58:38 -0800 Subject: [PATCH 2/6] Add SkeletonInstances in TrainingFrame (#3) * Add `SkeletonInstances` class for multiple instances * Merge changes from main * Start changing tests * Add `SkeletonInstances` to `TrainingFrame` * Add name to distinguish instances * Add `SkeletonInstances` to tests --------- Co-authored-by: eberrigan --- spec/ndx-pose.extensions.yaml | 20 ++- src/pynwb/ndx_pose/__init__.py | 5 +- src/pynwb/ndx_pose/pose.py | 75 ++++++++--- src/pynwb/ndx_pose/testing/mock/pose.py | 54 ++++++-- src/pynwb/tests/integration/hdf5/test_pose.py | 92 +++++++++++--- src/pynwb/tests/test_example_usage.py | 43 +++++-- src/pynwb/tests/unit/test_pose.py | 117 +++++++++++++----- src/spec/create_extension_spec.py | 37 ++++-- 8 files changed, 346 insertions(+), 97 deletions(-) diff --git a/spec/ndx-pose.extensions.yaml b/spec/ndx-pose.extensions.yaml index 4168ab2..aec3636 100644 --- a/spec/ndx-pose.extensions.yaml +++ b/spec/ndx-pose.extensions.yaml @@ -142,9 +142,9 @@ groups: then `source_video` is required. required: false groups: - - name: skeleton_instance - neurodata_type_inc: SkeletonInstance - doc: Position data for a single instance of a skeleton in a single training frame. + - name: skeleton_instances + neurodata_type_inc: SkeletonInstances + doc: Position data for all instances of a skeleton in a single training frame. links: - name: source_video target_type: ImageSeries @@ -159,8 +159,8 @@ groups: - neurodata_type_def: SkeletonInstance neurodata_type_inc: NWBDataInterface default_name: skeleton_instance - doc: Group that holds ground-truth pose data for a single instance of a skeleton - in a single frame. This is meant to be used within a TrainingFrame. + doc: 'Group that holds ground-truth pose data for a single instance of a skeleton + in a single frame. ' attributes: - name: id dtype: uint8 @@ -200,6 +200,16 @@ groups: - neurodata_type_inc: TrainingFrame doc: Ground-truth position data for all instances of a skeleton in a single frame. quantity: '*' +- neurodata_type_def: SkeletonInstances + neurodata_type_inc: NWBDataInterface + default_name: skeleton_instances + doc: Organizational group to hold skeleton instances.This is meant to be used within + a TrainingFrame. + groups: + - neurodata_type_inc: SkeletonInstance + doc: Ground-truth position data for a single instance of a skeleton in a single + training frame. + quantity: '*' - neurodata_type_def: SourceVideos neurodata_type_inc: NWBDataInterface default_name: source_videos diff --git a/src/pynwb/ndx_pose/__init__.py b/src/pynwb/ndx_pose/__init__.py index 1e9a53e..414bf3b 100644 --- a/src/pynwb/ndx_pose/__init__.py +++ b/src/pynwb/ndx_pose/__init__.py @@ -2,7 +2,9 @@ from pynwb import load_namespaces, get_class # Set path of the namespace.yaml file to the expected install location -ndx_pose_specpath = os.path.join(os.path.dirname(__file__), "spec", "ndx-pose.namespace.yaml") +ndx_pose_specpath = os.path.join( + os.path.dirname(__file__), "spec", "ndx-pose.namespace.yaml" +) # If the extension has not been installed yet but we are running directly from # the git repo @@ -30,6 +32,7 @@ TrainingFrame, TrainingFrames, SkeletonInstance, + SkeletonInstances, SourceVideos, PoseTraining, ) # noqa: E402, F401 diff --git a/src/pynwb/ndx_pose/pose.py b/src/pynwb/ndx_pose/pose.py index 03a1887..65390cc 100644 --- a/src/pynwb/ndx_pose/pose.py +++ b/src/pynwb/ndx_pose/pose.py @@ -10,6 +10,7 @@ Skeleton = get_class("Skeleton", "ndx-pose") Skeletons = get_class("Skeletons", "ndx-pose") SkeletonInstance = get_class("SkeletonInstance", "ndx-pose") +SkeletonInstances = get_class("SkeletonInstances", "ndx-pose") TrainingFrame = get_class("TrainingFrame", "ndx-pose") TrainingFrames = get_class("TrainingFrames", "ndx-pose") SourceVideos = get_class("SourceVideos", "ndx-pose") @@ -30,7 +31,9 @@ class PoseEstimationSeries(SpatialSeries): { "name": "name", "type": str, - "doc": ("Name of this PoseEstimationSeries, usually the name of a body part."), + "doc": ( + "Name of this PoseEstimationSeries, usually the name of a body part." + ), }, { "name": "data", @@ -47,7 +50,9 @@ class PoseEstimationSeries(SpatialSeries): "name": "confidence", "type": ("array_data", "data"), "shape": (None,), - "doc": ("Confidence or likelihood of the estimated positions, scaled to be between 0 and 1."), + "doc": ( + "Confidence or likelihood of the estimated positions, scaled to be between 0 and 1." + ), "default": None, }, { @@ -64,7 +69,8 @@ class PoseEstimationSeries(SpatialSeries): "name": "confidence_definition", "type": str, "doc": ( - "Description of how the confidence was computed, e.g., " "'Softmax output of the deep neural network'." + "Description of how the confidence was computed, e.g., " + "'Softmax output of the deep neural network'." ), "default": None, }, @@ -85,7 +91,9 @@ class PoseEstimationSeries(SpatialSeries): ) def __init__(self, **kwargs): """Construct a new PoseEstimationSeries representing pose estimates for a particular body part.""" - confidence, confidence_definition = popargs("confidence", "confidence_definition", kwargs) + confidence, confidence_definition = popargs( + "confidence", "confidence_definition", kwargs + ) super().__init__(**kwargs) self.confidence = confidence self.confidence_definition = confidence_definition @@ -160,7 +168,9 @@ class PoseEstimation(MultiContainerInterface): "name": "labeled_videos", "type": ("array_data", "data"), "shape": (None,), - "doc": ("Paths to the labeled video files. The number of files should equal the number of camera devices."), + "doc": ( + "Paths to the labeled video files. The number of files should equal the number of camera devices." + ), "default": None, }, { @@ -185,7 +195,9 @@ class PoseEstimation(MultiContainerInterface): { "name": "source_software", "type": str, - "doc": ("Name of the software tool used. Specifying the version attribute is strongly encouraged."), + "doc": ( + "Name of the software tool used. Specifying the version attribute is strongly encouraged." + ), "default": None, }, { @@ -226,11 +238,14 @@ def __init__(self, **kwargs): nodes, edges, skeleton = popargs("nodes", "edges", "skeleton", kwargs) if nodes is not None or edges is not None: if skeleton is not None: - raise ValueError("Cannot specify both 'nodes' and 'edges' and 'skeleton'.") + raise ValueError( + "Cannot specify both 'nodes' and 'edges' and 'skeleton'." + ) skeleton = Skeleton(name="subject", nodes=nodes, edges=edges) warnings.warn( "The 'nodes' and 'edges' arguments are deprecated. Please use the 'skeleton' argument instead.", - DeprecationWarning, stacklevel=2 + DeprecationWarning, + stacklevel=2, ) # devices must be added to the NWBFile before being linked to from a PoseEstimation object. @@ -243,10 +258,16 @@ def __init__(self, **kwargs): "All devices linked to from a PoseEstimation object must be added to the NWBFile first." ) - pose_estimation_series, description = popargs("pose_estimation_series", "description", kwargs) - original_videos, labeled_videos = popargs("original_videos", "labeled_videos", kwargs) + pose_estimation_series, description = popargs( + "pose_estimation_series", "description", kwargs + ) + original_videos, labeled_videos = popargs( + "original_videos", "labeled_videos", kwargs + ) dimensions, scorer = popargs("dimensions", "scorer", kwargs) - source_software, source_software_version = popargs("source_software", "source_software_version", kwargs) + source_software, source_software_version = popargs( + "source_software", "source_software_version", kwargs + ) super().__init__(**kwargs) self.pose_estimation_series = pose_estimation_series self.description = description @@ -266,12 +287,24 @@ def __init__(self, **kwargs): # TODO validate that the nodes correspond to the names of the pose estimation series objects # validate that len(original_videos) == len(labeled_videos) == len(dimensions) == len(cameras) - if original_videos is not None and (devices is None or len(original_videos) != len(devices)): - raise ValueError("The number of original videos should equal the number of camera devices.") - if labeled_videos is not None and (devices is None or len(labeled_videos) != len(devices)): - raise ValueError("The number of labeled videos should equal the number of camera devices.") - if dimensions is not None and (devices is None or len(dimensions) != len(devices)): - raise ValueError("The number of dimensions should equal the number of camera devices.") + if original_videos is not None and ( + devices is None or len(original_videos) != len(devices) + ): + raise ValueError( + "The number of original videos should equal the number of camera devices." + ) + if labeled_videos is not None and ( + devices is None or len(labeled_videos) != len(devices) + ): + raise ValueError( + "The number of labeled videos should equal the number of camera devices." + ) + if dimensions is not None and ( + devices is None or len(dimensions) != len(devices) + ): + raise ValueError( + "The number of dimensions should equal the number of camera devices." + ) @property def nodes(self): @@ -279,7 +312,9 @@ def nodes(self): @nodes.setter def nodes(self, value): - raise ValueError("'nodes' is deprecated. Please use the 'skeleton' field instead.") + raise ValueError( + "'nodes' is deprecated. Please use the 'skeleton' field instead." + ) @property def edges(self): @@ -287,4 +322,6 @@ def edges(self): @edges.setter def edges(self, value): - raise ValueError("'edges' is deprecated. Please use the 'skeleton' field instead.") \ No newline at end of file + raise ValueError( + "'edges' is deprecated. Please use the 'skeleton' field instead." + ) diff --git a/src/pynwb/ndx_pose/testing/mock/pose.py b/src/pynwb/ndx_pose/testing/mock/pose.py index 13d0bfc..2b8ebf6 100644 --- a/src/pynwb/ndx_pose/testing/mock/pose.py +++ b/src/pynwb/ndx_pose/testing/mock/pose.py @@ -6,7 +6,16 @@ from pynwb.testing.mock.utils import name_generator from pynwb.testing.mock.device import mock_Device -from ...pose import PoseEstimationSeries, Skeleton, PoseEstimation, SkeletonInstance, TrainingFrame, Skeletons, PoseTraining +from ...pose import ( + PoseEstimationSeries, + Skeleton, + PoseEstimation, + SkeletonInstance, + SkeletonInstances, + TrainingFrame, + Skeletons, + PoseTraining, +) def mock_PoseEstimationSeries( @@ -91,7 +100,9 @@ def mock_PoseEstimation( NWBFile should be provided so that the skeleton can be added to the NWBFile in a PoseTraining object. """ skeleton = skeleton or mock_Skeleton() - pose_estimation_series = pose_estimation_series or [mock_PoseEstimationSeries(name=name) for name in skeleton.nodes] + pose_estimation_series = pose_estimation_series or [ + mock_PoseEstimationSeries(name=name) for name in skeleton.nodes + ] pe = PoseEstimation( pose_estimation_series=pose_estimation_series, description=description, @@ -110,7 +121,9 @@ def mock_PoseEstimation( pose_training = PoseTraining(skeletons=skeletons) if "behavior" not in nwbfile.processing: - behavior_pm = nwbfile.create_processing_module(name="behavior", description="processed behavioral data") + behavior_pm = nwbfile.create_processing_module( + name="behavior", description="processed behavioral data" + ) else: behavior_pm = nwbfile.processing["behavior"] behavior_pm.add(pe) @@ -118,8 +131,10 @@ def mock_PoseEstimation( return pe + def mock_SkeletonInstance( *, + name: Optional[str] = None, id: Optional[np.uint] = np.uint(10), node_locations: Optional[Any] = None, node_visibility: list = None, @@ -138,18 +153,35 @@ def mock_SkeletonInstance( edges=np.array([[0, 1]], dtype="uint8"), ) if node_locations is None: - node_locations = np.arange(num_nodes * 2, dtype=np.float64).reshape((num_nodes, 2)) + node_locations = np.arange(num_nodes * 2, dtype=np.float64).reshape( + (num_nodes, 2) + ) + + if name is None: + name = skeleton.name + "_instance_" + str(id) if node_visibility is None: node_visibility = np.ones(num_nodes, dtype="bool") skeleton_instance = SkeletonInstance( + name=name, id=id, node_locations=node_locations, node_visibility=node_visibility, skeleton=skeleton, ) + return skeleton_instance +def mock_SkeletonInstances(skeleton_instances=None): + if skeleton_instances is None: + skeleton_instances = [mock_SkeletonInstance()] + if not isinstance(skeleton_instances, list): + skeleton_instances = [skeleton_instances] + return SkeletonInstances( + skeleton_instances=skeleton_instances, + ) + + def mock_source_video( *, name: Optional[str] = None, @@ -172,11 +204,18 @@ def mock_source_frame( ): return RGBImage(name=name, data=np.random.rand(640, 480, 3).astype("uint8")) +def mock_source_frame( + *, + name: Optional[str] = None, +): + return RGBImage(name=name, data=np.random.rand(640, 480, 3).astype("uint8")) + + def mock_TrainingFrame( *, name: Optional[str] = None, annotator: Optional[str] = "Awesome Possum", - skeleton_instance: SkeletonInstance = None, + skeleton_instances: SkeletonInstances = None, source_video: ImageSeries = None, source_frame: Image = None, source_video_frame_index: np.uint = np.uint(10), @@ -184,8 +223,9 @@ def mock_TrainingFrame( training_frame = TrainingFrame( name=name or name_generator("TrainingFrame"), annotator=annotator, - skeleton_instance=skeleton_instance or mock_SkeletonInstance(), - source_video=source_video or (mock_source_video() if source_frame is None else None), + skeleton_instances=skeleton_instances or mock_SkeletonInstances(), + source_video=source_video + or (mock_source_video() if source_frame is None else None), source_frame=source_frame, source_video_frame_index=source_video_frame_index, ) diff --git a/src/pynwb/tests/integration/hdf5/test_pose.py b/src/pynwb/tests/integration/hdf5/test_pose.py index e21d511..7840814 100644 --- a/src/pynwb/tests/integration/hdf5/test_pose.py +++ b/src/pynwb/tests/integration/hdf5/test_pose.py @@ -4,8 +4,25 @@ from pynwb import NWBHDF5IO, NWBFile from pynwb.testing import TestCase, remove_test_file, NWBH5IOFlexMixin -from ndx_pose import PoseEstimationSeries, PoseEstimation, PoseTraining, Skeletons, SourceVideos, TrainingFrames -from ndx_pose.testing.mock.pose import mock_PoseEstimationSeries, mock_Skeleton, mock_PoseEstimation, mock_SkeletonInstance, mock_TrainingFrame, mock_source_video +from ndx_pose import ( + PoseEstimationSeries, + PoseEstimation, + PoseTraining, + Skeletons, + SourceVideos, + TrainingFrames, + SkeletonInstance, + SkeletonInstances, +) +from ndx_pose.testing.mock.pose import ( + mock_PoseEstimationSeries, + mock_Skeleton, + mock_PoseEstimation, + mock_SkeletonInstance, + mock_SkeletonInstances, + mock_TrainingFrame, + mock_source_video, +) class TestPoseEstimationSeriesRoundtrip(TestCase): @@ -41,7 +58,9 @@ def test_roundtrip(self): ) # ideally the PoseEstimationSeries is added to a PoseEstimation object but here, test just the series - behavior_pm = self.nwbfile.create_processing_module(name="behavior", description="processed behavioral data") + behavior_pm = self.nwbfile.create_processing_module( + name="behavior", description="processed behavioral data" + ) behavior_pm.add(pes) with NWBHDF5IO(self.path, mode="w") as io: @@ -49,7 +68,9 @@ def test_roundtrip(self): with NWBHDF5IO(self.path, mode="r", load_namespaces=True) as io: read_nwbfile = io.read() - self.assertContainerEqual(pes, read_nwbfile.processing["behavior"]["front_left_paw"]) + self.assertContainerEqual( + pes, read_nwbfile.processing["behavior"]["front_left_paw"] + ) def test_roundtrip_link_timestamps(self): """ @@ -83,7 +104,9 @@ def test_roundtrip_link_timestamps(self): ) # ideally the PoseEstimationSeries is added to a PoseEstiamtion object but here, test just the series - behavior_pm = self.nwbfile.create_processing_module(name="behavior", description="processed behavioral data") + behavior_pm = self.nwbfile.create_processing_module( + name="behavior", description="processed behavioral data" + ) behavior_pm.add(front_left_paw) behavior_pm.add(front_right_paw) @@ -92,8 +115,12 @@ def test_roundtrip_link_timestamps(self): with NWBHDF5IO(self.path, mode="r", load_namespaces=True) as io: read_nwbfile = io.read() - self.assertContainerEqual(front_left_paw, read_nwbfile.processing["behavior"]["front_left_paw"]) - self.assertContainerEqual(front_right_paw, read_nwbfile.processing["behavior"]["front_right_paw"]) + self.assertContainerEqual( + front_left_paw, read_nwbfile.processing["behavior"]["front_left_paw"] + ) + self.assertContainerEqual( + front_right_paw, read_nwbfile.processing["behavior"]["front_right_paw"] + ) self.assertIs( read_nwbfile.processing["behavior"]["front_left_paw"].timestamps, read_nwbfile.processing["behavior"]["front_right_paw"].timestamps, @@ -107,10 +134,12 @@ def getContainerType(self): return "PoseEstimationSeries" def addContainer(self): - """Add the test PoseEstimationSeries to the given NWBFile """ + """Add the test PoseEstimationSeries to the given NWBFile""" pes = mock_PoseEstimationSeries(name="test_PES") - behavior_pm = self.nwbfile.create_processing_module(name="behavior", description="processed behavioral data") + behavior_pm = self.nwbfile.create_processing_module( + name="behavior", description="processed behavioral data" + ) behavior_pm.add(pes) def getContainer(self, nwbfile: NWBFile): @@ -138,7 +167,9 @@ def test_roundtrip(self): Add a PoseEstimation to an NWBFile, write it, read it, and test that the read object matches the original. """ skeleton = mock_Skeleton() - pose_estimation_series = [mock_PoseEstimationSeries(name=name) for name in skeleton.nodes] + pose_estimation_series = [ + mock_PoseEstimationSeries(name=name) for name in skeleton.nodes + ] pe = PoseEstimation( pose_estimation_series=pose_estimation_series, description="Estimated positions of front paws using DeepLabCut.", @@ -155,7 +186,9 @@ def test_roundtrip(self): skeletons = Skeletons(skeletons=[skeleton]) pose_training = PoseTraining(skeletons=skeletons) - behavior_pm = self.nwbfile.create_processing_module(name="behavior", description="processed behavioral data") + behavior_pm = self.nwbfile.create_processing_module( + name="behavior", description="processed behavioral data" + ) behavior_pm.add(pe) behavior_pm.add(pose_training) @@ -167,12 +200,20 @@ def test_roundtrip(self): read_pe = read_nwbfile.processing["behavior"]["PoseEstimation"] self.assertContainerEqual(read_pe, pe) self.assertEqual(len(read_pe.pose_estimation_series), 3) - self.assertContainerEqual(read_pe.pose_estimation_series["node1"], pose_estimation_series[0]) - self.assertContainerEqual(read_pe.pose_estimation_series["node2"], pose_estimation_series[1]) - self.assertContainerEqual(read_pe.pose_estimation_series["node3"], pose_estimation_series[2]) + self.assertContainerEqual( + read_pe.pose_estimation_series["node1"], pose_estimation_series[0] + ) + self.assertContainerEqual( + read_pe.pose_estimation_series["node2"], pose_estimation_series[1] + ) + self.assertContainerEqual( + read_pe.pose_estimation_series["node3"], pose_estimation_series[2] + ) self.assertContainerEqual(read_pe.skeleton, skeleton) self.assertEqual(len(read_pe.devices), 1) - self.assertContainerEqual(read_pe.devices[0], self.nwbfile.devices["camera1"]) + self.assertContainerEqual( + read_pe.devices[0], self.nwbfile.devices["camera1"] + ) class TestPoseEstimationRoundtripPyNWB(NWBH5IOFlexMixin, TestCase): @@ -182,7 +223,7 @@ def getContainerType(self): return "PoseEstimation" def addContainer(self): - """Add the test PoseEstimation to the given NWBFile """ + """Add the test PoseEstimation to the given NWBFile""" mock_PoseEstimation(nwbfile=self.nwbfile) def getContainer(self, nwbfile: NWBFile): @@ -196,27 +237,38 @@ def getContainerType(self): return "PoseTraining" def addContainer(self): - """Add the test PoseTraining to the given NWBFile """ + """Add the test PoseTraining to the given NWBFile""" skeleton1 = mock_Skeleton(name="subject1") skeleton2 = mock_Skeleton(name="subject2") source_video = mock_source_video(name="source_video") sk1_instance10 = mock_SkeletonInstance(id=np.uint(10), skeleton=skeleton1) + sk1_instance11 = mock_SkeletonInstance(id=np.uint(11), skeleton=skeleton1) + sk1_instances = mock_SkeletonInstances( + skeleton_instances=[sk1_instance10, sk1_instance11] + ) sk1_training_frame = mock_TrainingFrame( name="skeleton1_frame10", - skeleton_instance=sk1_instance10, + skeleton_instances=sk1_instances, source_video=source_video, source_video_frame_index=np.uint(10), ) sk2_instance10 = mock_SkeletonInstance(id=np.uint(10), skeleton=skeleton2) + sk2_instance11 = mock_SkeletonInstance(id=np.uint(11), skeleton=skeleton2) + sk2_instance12 = mock_SkeletonInstance(id=np.uint(12), skeleton=skeleton2) + sk2_instances = mock_SkeletonInstances( + skeleton_instances=[sk2_instance10, sk2_instance11, sk2_instance12] + ) sk2_training_frame = mock_TrainingFrame( name="skeleton2_frame10", - skeleton_instance=sk2_instance10, + skeleton_instances=sk2_instances, source_video=source_video, source_video_frame_index=np.uint(10), ) skeletons = Skeletons(skeletons=[skeleton1, skeleton2]) - training_frames = TrainingFrames(training_frames=[sk1_training_frame, sk2_training_frame]) + training_frames = TrainingFrames( + training_frames=[sk1_training_frame, sk2_training_frame] + ) source_videos = SourceVideos(image_series=[source_video]) pose_training = PoseTraining( diff --git a/src/pynwb/tests/test_example_usage.py b/src/pynwb/tests/test_example_usage.py index 3d729b6..0863797 100644 --- a/src/pynwb/tests/test_example_usage.py +++ b/src/pynwb/tests/test_example_usage.py @@ -1,4 +1,6 @@ """An example of how to use the ndx-pose extension, packaged as a test so that it is run by pytest.""" + + def test_example_usage(): import datetime import numpy as np @@ -13,6 +15,7 @@ def test_example_usage(): Skeletons, TrainingFrames, SourceVideos, + SkeletonInstances, ) from pynwb.image import ImageSeries @@ -97,7 +100,9 @@ def test_example_usage(): description="Estimated positions of front paws using DeepLabCut.", original_videos=["path/to/camera1.mp4"], labeled_videos=["path/to/camera1_labeled.mp4"], - dimensions=np.array([[640, 480]], dtype="uint16"), # pixel dimensions of the video + dimensions=np.array( + [[640, 480]], dtype="uint16" + ), # pixel dimensions of the video devices=[camera1], scorer="DLC_resnet50_openfieldOct30shuffle1_1600", source_software="DeepLabCut", @@ -135,11 +140,15 @@ def test_example_usage(): ] ) - skeleton_instances = [] + # create 50 training frames using the training video and the skeleton instances. + # the skeleton instances start with video frame 0 and end with video frame 49. + training_frames_list = [] for i in range(50): + skeleton_instances_list = [] # add some noise to the node locations from the location on the previous frame node_locations = node_locations + np.random.rand(3, 2) - instance = SkeletonInstance( + instance_1 = SkeletonInstance( + name=f"skeleton1_instance{i}", id=np.uint(i), node_locations=node_locations, node_visibility=[ @@ -149,17 +158,33 @@ def test_example_usage(): ], skeleton=skeleton, # link to the skeleton ) - skeleton_instances.append(instance) + skeleton_instances_list.append(instance_1) + + # add some noise to the node locations from the location on the previous frame + node_locations = node_locations + np.random.rand(3, 2) + instance_2 = SkeletonInstance( + name=f"skeleton2_instance{i}", + id=np.uint(i), + node_locations=node_locations, + node_visibility=[ + True, # front_left_paw + True, # body + True, # front_right_paw + ], + skeleton=skeleton, # link to the skeleton + ) + skeleton_instances_list.append(instance_2) + + # store the skeleton instances in a SkeletonInstances object + skeleton_instances = SkeletonInstances( + skeleton_instances=skeleton_instances_list + ) - # create 50 training frames using the training video and the skeleton instances. - # the skeleton instances start with video frame 0 and end with video frame 49. - training_frames_list = [] - for i in range(50): # names must be unique within a PoseTraining object (we will add them to a PoseTraining object below) training_frame = TrainingFrame( name="frame_{}".format(i), annotator="Bilbo Baggins", - skeleton_instance=skeleton_instances[i], + skeleton_instances=skeleton_instances, source_video=training_video1, source_video_frame_index=np.uint(i), ) diff --git a/src/pynwb/tests/unit/test_pose.py b/src/pynwb/tests/unit/test_pose.py index ef34618..2082527 100644 --- a/src/pynwb/tests/unit/test_pose.py +++ b/src/pynwb/tests/unit/test_pose.py @@ -12,6 +12,7 @@ PoseEstimation, TrainingFrame, SkeletonInstance, + SkeletonInstances, PoseTraining, Skeletons, TrainingFrames, @@ -19,6 +20,7 @@ ) from ndx_pose.testing.mock.pose import ( mock_PoseEstimationSeries, + mock_SkeletonInstances, mock_source_video, mock_source_frame, mock_Skeleton, @@ -46,13 +48,17 @@ def test_constructor(self): ) self.assertEqual(pes.name, "front_left_paw") - self.assertEqual(pes.description, "Marker placed around fingers of front left paw.") + self.assertEqual( + pes.description, "Marker placed around fingers of front left paw." + ) np.testing.assert_array_equal(pes.data, data) self.assertEqual(pes.unit, "pixels") self.assertEqual(pes.reference_frame, "(0,0,0) corresponds to ...") np.testing.assert_array_equal(pes.timestamps, timestamps) np.testing.assert_array_equal(pes.confidence, confidence) - self.assertEqual(pes.confidence_definition, "Softmax output of the deep neural network.") + self.assertEqual( + pes.confidence_definition, "Softmax output of the deep neural network." + ) class TestSkeleton(TestCase): @@ -66,7 +72,9 @@ def test_init(self): ) self.assertEqual(skeleton.name, "subject1") self.assertEqual(skeleton.nodes, ["front_left_paw", "body", "front_right_paw"]) - np.testing.assert_array_equal(skeleton.edges, np.array([[0, 1], [1, 2]], dtype="uint8")) + np.testing.assert_array_equal( + skeleton.edges, np.array([[0, 1], [1, 2]], dtype="uint8") + ) class TestPoseEstimationConstructor(TestCase): @@ -117,13 +125,23 @@ def test_constructor(self): self.assertEqual(pe.name, "PoseEstimation") self.assertEqual(len(pe.pose_estimation_series), 3) - self.assertIs(pe.pose_estimation_series["front_left_paw"], pose_estimation_series[0]) + self.assertIs( + pe.pose_estimation_series["front_left_paw"], pose_estimation_series[0] + ) self.assertIs(pe.pose_estimation_series["body"], pose_estimation_series[1]) - self.assertIs(pe.pose_estimation_series["front_right_paw"], pose_estimation_series[2]) - self.assertEqual(pe.description, "Estimated positions of front paws using DeepLabCut.") + self.assertIs( + pe.pose_estimation_series["front_right_paw"], pose_estimation_series[2] + ) + self.assertEqual( + pe.description, "Estimated positions of front paws using DeepLabCut." + ) self.assertEqual(pe.original_videos, ["camera1.mp4", "camera2.mp4"]) - self.assertEqual(pe.labeled_videos, ["camera1_labeled.mp4", "camera2_labeled.mp4"]) - np.testing.assert_array_equal(pe.dimensions, np.array([[640, 480], [1024, 768]], dtype="uint16")) + self.assertEqual( + pe.labeled_videos, ["camera1_labeled.mp4", "camera2_labeled.mp4"] + ) + np.testing.assert_array_equal( + pe.dimensions, np.array([[640, 480], [1024, 768]], dtype="uint16") + ) self.assertEqual(len(pe.devices), 2) self.assertIs(pe.devices[0], self.nwbfile.devices["camera1"]) self.assertIs(pe.devices[1], self.nwbfile.devices["camera2"]) @@ -192,7 +210,10 @@ def test_constructor_nodes_edges(self): original_videos=["camera1.mp4", "camera2.mp4"], labeled_videos=["camera1_labeled.mp4", "camera2_labeled.mp4"], dimensions=np.array([[640, 480], [1024, 768]], dtype="uint16"), - devices=[self.nwbfile.devices["camera1"], self.nwbfile.devices["camera2"]], + devices=[ + self.nwbfile.devices["camera1"], + self.nwbfile.devices["camera2"], + ], scorer="DLC_resnet50_openfieldOct30shuffle1_1600", source_software="DeepLabCut", source_software_version="2.2b8", @@ -200,7 +221,9 @@ def test_constructor_nodes_edges(self): edges=np.array([[0, 1], [1, 2]], dtype="uint8"), ) self.assertEqual(pe.nodes, ["front_left_paw", "body", "front_right_paw"]) - np.testing.assert_array_equal(pe.edges, np.array([[0, 1], [1, 2]], dtype="uint8")) + np.testing.assert_array_equal( + pe.edges, np.array([[0, 1], [1, 2]], dtype="uint8") + ) skeleton = Skeleton( name="subject", nodes=["front_left_paw", "body", "front_right_paw"], @@ -241,61 +264,73 @@ def test_constructor(self): class TestTrainingFrame(TestCase): def test_constructor(self): - skeleton_instance = mock_SkeletonInstance() + skeleton_instances = mock_SkeletonInstances() source_video = mock_source_video(name="source_video") training_frame = TrainingFrame( name="frame0", annotator="Awesome Possum", - skeleton_instance=skeleton_instance, + skeleton_instances=skeleton_instances, source_video=source_video, source_video_frame_index=np.uint(0), ) self.assertEqual(training_frame.name, "frame0") self.assertEqual(training_frame.annotator, "Awesome Possum") - self.assertIs(training_frame.skeleton_instance, skeleton_instance) + self.assertIs(training_frame.skeleton_instances, skeleton_instances) self.assertIs(training_frame.source_video, source_video) self.assertEqual(training_frame.source_video_frame_index, np.uint(0)) class TestTrainingFrameImage(TestCase): def test_constructor(self): - skeleton_instance = mock_SkeletonInstance() + skeleton_instances = mock_SkeletonInstances() source_frame = mock_source_frame(name="frame0_image") training_frame = TrainingFrame( name="frame0", annotator="Awesome Possum", - skeleton_instance=skeleton_instance, + skeleton_instances=skeleton_instances, source_frame=source_frame, source_video_frame_index=np.uint(0), ) self.assertEqual(training_frame.name, "frame0") self.assertEqual(training_frame.annotator, "Awesome Possum") - self.assertIs(training_frame.skeleton_instance, skeleton_instance) + self.assertIs(training_frame.skeleton_instances, skeleton_instances) self.assertIs(training_frame.source_frame, source_frame) self.assertEqual(training_frame.source_video_frame_index, np.uint(0)) + class TestPoseTraining(TestCase): def test_constructor(self): skeleton1 = mock_Skeleton(name="subject1") skeleton2 = mock_Skeleton(name="subject2") source_video = mock_source_video(name="source_video") sk1_instance10 = mock_SkeletonInstance(id=np.uint(10), skeleton=skeleton1) + sk1_instance11 = mock_SkeletonInstance(id=np.uint(11), skeleton=skeleton1) + sk1_instances = mock_SkeletonInstances( + skeleton_instances=[sk1_instance10, sk1_instance11] + ) sk1_training_frame = mock_TrainingFrame( name="skeleton1_frame10", - skeleton_instance=sk1_instance10, + skeleton_instances=sk1_instances, source_video=source_video, source_video_frame_index=np.uint(10), ) sk2_instance10 = mock_SkeletonInstance(id=np.uint(10), skeleton=skeleton2) + sk2_instance11 = mock_SkeletonInstance(id=np.uint(11), skeleton=skeleton2) + sk2_instance12 = mock_SkeletonInstance(id=np.uint(12), skeleton=skeleton2) + sk2_instances = mock_SkeletonInstances( + skeleton_instances=[sk2_instance10, sk2_instance11, sk2_instance12] + ) sk2_training_frame = mock_TrainingFrame( name="skeleton2_frame10", - skeleton_instance=sk2_instance10, + skeleton_instances=sk2_instances, source_video=source_video, source_video_frame_index=np.uint(10), ) skeletons = Skeletons(skeletons=[skeleton1, skeleton2]) - training_frames = TrainingFrames(training_frames=[sk1_training_frame, sk2_training_frame]) + training_frames = TrainingFrames( + training_frames=[sk1_training_frame, sk2_training_frame] + ) source_videos = SourceVideos(image_series=[source_video]) pose_training = PoseTraining( @@ -307,10 +342,18 @@ def test_constructor(self): self.assertIs(pose_training.skeletons.skeletons["subject1"], skeleton1) self.assertIs(pose_training.skeletons.skeletons["subject2"], skeleton2) self.assertEqual(len(pose_training.training_frames.training_frames), 2) - self.assertIs(pose_training.training_frames.training_frames["skeleton1_frame10"], sk1_training_frame) - self.assertIs(pose_training.training_frames.training_frames["skeleton2_frame10"], sk2_training_frame) + self.assertIs( + pose_training.training_frames.training_frames["skeleton1_frame10"], + sk1_training_frame, + ) + self.assertIs( + pose_training.training_frames.training_frames["skeleton2_frame10"], + sk2_training_frame, + ) self.assertEqual(len(pose_training.source_videos.image_series), 1) - self.assertIs(pose_training.source_videos.image_series["source_video"], source_video) + self.assertIs( + pose_training.source_videos.image_series["source_video"], source_video + ) class TestPoseTrainingImages(TestCase): @@ -319,24 +362,36 @@ def test_constructor(self): skeleton2 = mock_Skeleton(name="subject2") source_frame_10 = mock_source_frame(name="source_frame_10") sk1_instance10 = mock_SkeletonInstance(id=np.uint(10), skeleton=skeleton1) + sk1_instance11 = mock_SkeletonInstance(id=np.uint(11), skeleton=skeleton1) + sk1_instances = mock_SkeletonInstances( + skeleton_instances=[sk1_instance10, sk1_instance11] + ) sk1_training_frame = mock_TrainingFrame( name="frame10", - skeleton_instance=sk1_instance10, + skeleton_instances=sk1_instances, source_frame=source_frame_10, source_video_frame_index=np.uint(10), ) - + source_frame_11 = mock_source_frame(name="source_frame_11") + + sk2_instance10 = mock_SkeletonInstance(id=np.uint(10), skeleton=skeleton2) sk2_instance11 = mock_SkeletonInstance(id=np.uint(11), skeleton=skeleton2) + sk2_instance12 = mock_SkeletonInstance(id=np.uint(12), skeleton=skeleton2) + sk2_instances = mock_SkeletonInstances( + skeleton_instances=[sk2_instance10, sk2_instance11, sk2_instance12] + ) sk2_training_frame = mock_TrainingFrame( name="frame11", - skeleton_instance=sk2_instance11, + skeleton_instances=sk2_instances, source_frame=source_frame_11, source_video_frame_index=np.uint(11), ) skeletons = Skeletons(skeletons=[skeleton1, skeleton2]) - training_frames = TrainingFrames(training_frames=[sk1_training_frame, sk2_training_frame]) + training_frames = TrainingFrames( + training_frames=[sk1_training_frame, sk2_training_frame] + ) pose_training = PoseTraining( skeletons=skeletons, @@ -346,6 +401,10 @@ def test_constructor(self): self.assertIs(pose_training.skeletons.skeletons["subject1"], skeleton1) self.assertIs(pose_training.skeletons.skeletons["subject2"], skeleton2) self.assertEqual(len(pose_training.training_frames.training_frames), 2) - self.assertIs(pose_training.training_frames.training_frames["frame10"], sk1_training_frame) - self.assertIs(pose_training.training_frames.training_frames["frame11"], sk2_training_frame) - self.assertIsNone(pose_training.source_videos) \ No newline at end of file + self.assertIs( + pose_training.training_frames.training_frames["frame10"], sk1_training_frame + ) + self.assertIs( + pose_training.training_frames.training_frames["frame11"], sk2_training_frame + ) + self.assertIsNone(pose_training.source_videos) diff --git a/src/spec/create_extension_spec.py b/src/spec/create_extension_spec.py index 0b3ac0c..10e1bf5 100644 --- a/src/spec/create_extension_spec.py +++ b/src/spec/create_extension_spec.py @@ -215,7 +215,6 @@ def main(): neurodata_type_inc="NWBDataInterface", doc=( "Group that holds ground-truth pose data for a single instance of a skeleton in a single frame. " - "This is meant to be used within a TrainingFrame." ), default_name="skeleton_instance", links=[ @@ -235,7 +234,9 @@ def main(): datasets=[ NWBDatasetSpec( name="node_locations", - doc=("Locations (x, y) or (x, y, z) of nodes for single instance in single frame."), + doc=( + "Locations (x, y) or (x, y, z) of nodes for single instance in single frame." + ), dtype="float", dims=[["num_body_parts", "x, y"], ["num_body_parts", "x, y, z"]], shape=[[None, 2], [None, 3]], @@ -255,6 +256,23 @@ def main(): ], ) + skeleton_instances = NWBGroupSpec( + neurodata_type_def="SkeletonInstances", + neurodata_type_inc="NWBDataInterface", + doc=( + "Organizational group to hold skeleton instances." + "This is meant to be used within a TrainingFrame." + ), + default_name="skeleton_instances", + groups=[ + NWBGroupSpec( + neurodata_type_inc="SkeletonInstance", + doc="Ground-truth position data for a single instance of a skeleton in a single training frame.", + quantity="*", + ), + ], + ) + source_videos = NWBGroupSpec( neurodata_type_def="SourceVideos", neurodata_type_inc="NWBDataInterface", @@ -277,9 +295,9 @@ def main(): default_name="TrainingFrame", groups=[ NWBGroupSpec( - name="skeleton_instance", - neurodata_type_inc="SkeletonInstance", - doc="Position data for a single instance of a skeleton in a single training frame.", + name="skeleton_instances", + neurodata_type_inc="SkeletonInstances", + doc="Position data for all instances of a skeleton in a single training frame.", ), ], attributes=[ @@ -371,15 +389,20 @@ def main(): training_frame, skeleton_instance, training_frames, + skeleton_instances, source_videos, skeletons, pose_training, ] # export the spec to yaml files in the spec folder - output_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "spec")) + output_dir = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..", "..", "spec") + ) export_spec(ns_builder, new_data_types, output_dir) - print("Spec files generated. Please make sure to rerun `pip install .` to load the changes.") + print( + "Spec files generated. Please make sure to rerun `pip install .` to load the changes." + ) if __name__ == "__main__": From 5898ce8372fc1ec1b9ca84ab6085e8e06cc269a1 Mon Sep 17 00:00:00 2001 From: rly Date: Wed, 13 Mar 2024 16:37:20 -0700 Subject: [PATCH 3/6] Update authors --- README.md | 2 ++ spec/ndx-pose.namespace.yaml | 6 ++++++ src/spec/create_extension_spec.py | 6 ++++++ 3 files changed, 14 insertions(+) diff --git a/README.md b/README.md index 3faf067..09426ba 100644 --- a/README.md +++ b/README.md @@ -317,5 +317,7 @@ also use ndx-pose. - @roomrys - @CBroz1 - @h-mayorquin +- @talmo +- @eberrigan This extension was created using [ndx-template](https://github.com/nwb-extensions/ndx-template). diff --git a/spec/ndx-pose.namespace.yaml b/spec/ndx-pose.namespace.yaml index 79b522c..5ef8ffe 100644 --- a/spec/ndx-pose.namespace.yaml +++ b/spec/ndx-pose.namespace.yaml @@ -5,12 +5,18 @@ namespaces: - Alexander Mathis - Liezl Maree - Chris Brozdowski + - Heberto Mayorquin + - Talmo Pereira + - Elizabeth Berrigan contact: - rly@lbl.gov - bdichter@lbl.gov - alexander.mathis@epfl.ch - lmaree@salk.edu - cbroz@datajoint.com + - h.mayorquin@gmail.com + - talmo@salk.edu + - eberrigan@salk.edu doc: NWB extension to store pose estimation data name: ndx-pose schema: diff --git a/src/spec/create_extension_spec.py b/src/spec/create_extension_spec.py index 10e1bf5..37417b3 100644 --- a/src/spec/create_extension_spec.py +++ b/src/spec/create_extension_spec.py @@ -23,6 +23,9 @@ def main(): "Alexander Mathis", "Liezl Maree", "Chris Brozdowski", + "Heberto Mayorquin", + "Talmo Pereira", + "Elizabeth Berrigan", ], contact=[ "rly@lbl.gov", @@ -30,6 +33,9 @@ def main(): "alexander.mathis@epfl.ch", "lmaree@salk.edu", "cbroz@datajoint.com", + "h.mayorquin@gmail.com", + "talmo@salk.edu", + "eberrigan@salk.edu", ], ) From aa26b3f1238f3487e8771ba8b3012763da0c615e Mon Sep 17 00:00:00 2001 From: rly Date: Wed, 13 Mar 2024 16:37:52 -0700 Subject: [PATCH 4/6] Update requirements to get latest bug fixes --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index e416fa5..27e7f4d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ # pinned dependencies to reproduce a working development environment -hdmf==3.11.0 -pynwb==2.5.0 +hdmf==3.12.2 +pynwb==2.6.0 From fc6aaa3d8d105de6e350e4ba4650ed47c8f888db Mon Sep 17 00:00:00 2001 From: rly Date: Wed, 13 Mar 2024 16:54:27 -0700 Subject: [PATCH 5/6] Store Skeletons in behavior processing module, update docs&tests --- CHANGELOG.md | 9 +- README.md | 358 +++++++++++++----- spec/ndx-pose.extensions.yaml | 13 +- src/pynwb/ndx_pose/testing/mock/pose.py | 4 +- src/pynwb/tests/integration/hdf5/test_pose.py | 12 +- src/pynwb/tests/test_example_usage.py | 266 ++++++++++--- src/pynwb/tests/unit/test_pose.py | 24 +- src/spec/create_extension_spec.py | 17 +- 8 files changed, 522 insertions(+), 181 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b15f43c..0dbd9a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,8 +3,13 @@ ## ndx-pose 0.2.0 (Upcoming) ### Breaking changes -- Removed "nodes" and "edges" fields from `PoseEstimation` neurodata type. Create a `Skeleton` object and pass - it to the `skeleton` keyword argument of `PoseEstimation.__init__` instead. @rly (#7) +- Removed the `nodes` and `edges` fields from `PoseEstimation` neurodata type. To specify these, + create a `Skeleton` object with those values, create a `Skeletons` object and pass the `Skeleton` + object to that, and add the `Skeletons` object to your "behavior" processing module. @rly (#7, #24) + +### Major changes +- Added support for storing training data in the new `PoseTraining` neurodata type and other new types. + @roomrys, @CBroz1, @rly, @talmo, @eberrigan (#7, #21, #24) ### Minor changes - Made `PoseEstimation.confidence` optional. @h-mayorquin (#11) diff --git a/README.md b/README.md index 09426ba..d950214 100644 --- a/README.md +++ b/README.md @@ -2,29 +2,28 @@ [![PyPI version](https://badge.fury.io/py/ndx-pose.svg)](https://badge.fury.io/py/ndx-pose) -ndx-pose is a standardized format for storing pose estimation data in NWB. It was developed initially to store the -output of [DeepLabCut](http://www.mackenziemathislab.org/deeplabcut) in NWB, but is also designed to store the output -of general pose estimation tools. Please post an issue or PR to suggest or add support for your favorite pose -estimation tool. +ndx-pose is a standardized format for storing pose estimation data in NWB, such as from +[DeepLabCut](http://www.mackenziemathislab.org/deeplabcut) and [SLEAP](https://sleap.ai/). +Please post an issue or PR to suggest or add support for another pose estimation tool. This extension consists of several new neurodata types: +- `Skeleton` which stores the relationship between the body parts (nodes and edges). +- `Skeletons` which stores multiple `Skeleton` objects. - `PoseEstimationSeries` which stores the estimated positions (x, y) or (x, y, z) of a body part over time as well as the confidence/likelihood of the estimated positions. - `PoseEstimation` which stores the estimated position data (`PoseEstimationSeries`) for multiple body parts, computed from the same video(s) with the same tool/algorithm. -- `Skeleton` which stores the relationship between the body parts (nodes and edges). - `SkeletonInstance` which stores the estimated positions and visibility of the body parts for a single frame. -- `TrainingFrame` which stores the ground truth data for a single frame. It references a `SkeletonInstance` and -frame of a source video (`ImageSeries`). The source videos can be stored internally as data arrays or externally as -files referenced by relative file path. -- `Skeletons` which stores multiple `Skeleton` objects. +- `TrainingFrame` which stores the ground truth data for a single frame. It contains `SkeletonInstance` objects and +references a frame of a source video (`ImageSeries`). The source videos can be stored internally as data arrays or +externally as files referenced by relative file path. - `TrainingFrames` which stores multiple `TrainingFrame` objects. - `SourceVideos` which stores multiple `ImageSeries` objects representing source videos used in training. -- `PoseTraining` which stores the skeletons (`Skeletons`), ground truth data (`TrainingFrames`), and -source videos (`SourceVideos`) used to train the pose estimation model. +- `PoseTraining` which stores the ground truth data (`TrainingFrames`) and source videos (`SourceVideos`) +used to train the pose estimation model. -If training data are not available, then the `PoseTraining` object should contain only the `Skeletons` object -which contains the `Skeleton` object(s) used to generate the pose estimates. +It is recommended to place the `Skeletons`, `PoseEstimation`, and `PoseTraining` objects in an NWB processing module +named "behavior", as shown below. ## Installation @@ -32,25 +31,20 @@ which contains the `Skeleton` object(s) used to generate the pose estimates. ## Usage -### Example storing pose estimates (keypoints) and training data -With one camera, one video, three body parts, and 50 training frames. +### Example storing pose estimates (keypoints) +With one camera, one video, one skeleton, and three body parts per skeleton. ```python import datetime import numpy as np from pynwb import NWBFile, NWBHDF5IO +from pynwb.file import Subject from ndx_pose import ( PoseEstimationSeries, PoseEstimation, Skeleton, - SkeletonInstance, - TrainingFrame, - PoseTraining, Skeletons, - TrainingFrames, - SourceVideos, ) -from pynwb.image import ImageSeries # initialize an NWBFile object nwbfile = NWBFile( @@ -59,6 +53,24 @@ nwbfile = NWBFile( session_start_time=datetime.datetime.now(datetime.timezone.utc), ) +# add a subject to the NWB file +subject = Subject(subject_id="subject1", species="Mus musculus") +nwbfile.subject = subject + +# create a skeleton that define the relationship between the markers. also link this skeleton to the subject. +skeleton = Skeleton( + name="subject1_skeleton", + nodes=["front_left_paw", "body", "front_right_paw"], + # define edges between nodes using the indices of the nodes in the node list. + # this array represents an edge between front left paw and body, and an edge between body and front right paw. + edges=np.array([[0, 1], [1, 2]], dtype="uint8"), + subject=subject, +) + +# store the skeleton into a Skeletons container object. +# (this is more useful if you have multiple skeletons in your training data) +skeletons = Skeletons(skeletons=[skeleton]) + # create a device for the camera camera1 = nwbfile.create_device( name="camera1", @@ -72,8 +84,6 @@ data = np.random.rand(100, 2) # num_frames x (x, y) but can be (x, y, z) timestamps = np.linspace(0, 10, num=100) # a timestamp for every frame confidence = np.random.rand(100) # a confidence value for every frame reference_frame = "(0,0,0) corresponds to ..." - -# note the double underscore in "confidence__definition" because this is a property of the "confidence" field confidence_definition = "Softmax output of the deep neural network." front_left_paw = PoseEstimationSeries( @@ -100,7 +110,7 @@ body = PoseEstimationSeries( confidence_definition=confidence_definition, ) -data = np.random.rand(100, 2) # num_frames x (x, y) but can be (x, y, z) +data = np.random.rand(100, 2) # num_frames x (x, y) but can be num_frames x (x, y, z) confidence = np.random.rand(100) # a confidence value for every frame front_right_paw = PoseEstimationSeries( name="front_right_paw", @@ -116,36 +126,184 @@ front_right_paw = PoseEstimationSeries( # store all PoseEstimationSeries in a list pose_estimation_series = [front_left_paw, body, front_right_paw] -# create a skeleton that defines the relationship between the markers -skeleton = Skeleton( - name="subject1", +# create a PoseEstimation object that represents the estimated positions of each node, references +# the original video and labeled video files, and provides metadata on how these estimates were generated. +# multiple videos and cameras can be referenced. +pose_estimation = PoseEstimation( + name="PoseEstimation", + pose_estimation_series=pose_estimation_series, + description="Estimated positions of front paws of subject1 using DeepLabCut.", + original_videos=["path/to/camera1.mp4"], + labeled_videos=["path/to/camera1_labeled.mp4"], + dimensions=np.array( + [[640, 480]], dtype="uint16" + ), # pixel dimensions of the video + devices=[camera1], + scorer="DLC_resnet50_openfieldOct30shuffle1_1600", + source_software="DeepLabCut", + source_software_version="2.3.8", + skeleton=skeleton, # link to the skeleton object +) + +# create a "behavior" processing module to store the PoseEstimation and Skeletons objects +behavior_pm = nwbfile.create_processing_module( + name="behavior", + description="processed behavioral data", +) +behavior_pm.add(skeletons) +behavior_pm.add(pose_estimation) + +# write the NWBFile to disk +path = "test_pose.nwb" +with NWBHDF5IO(path, mode="w") as io: + io.write(nwbfile) + +# read the NWBFile from disk and print out the PoseEstimation and Skeleton objects +# as well as the first training frame +with NWBHDF5IO(path, mode="r") as io: + read_nwbfile = io.read() + print(read_nwbfile.processing["behavior"]["PoseEstimation"]) + print(read_nwbfile.processing["behavior"]["Skeletons"]["subject1_skeleton"]) +``` + +### Example storing pose estimates and training data (keypoints) +With one camera, one video, two skeletons (but only one pose estimate), three body parts per skeleton, +50 training frames with two skeleton instances per frame, and one source video. + +```python +import datetime +import numpy as np +from pynwb import NWBFile, NWBHDF5IO +from pynwb.file import Subject +from pynwb.image import ImageSeries +from ndx_pose import ( + PoseEstimationSeries, + PoseEstimation, + Skeleton, + SkeletonInstance, + TrainingFrame, + PoseTraining, + Skeletons, + TrainingFrames, + SourceVideos, + SkeletonInstances, +) + +# initialize an NWBFile object +nwbfile = NWBFile( + session_description="session_description", + identifier="identifier", + session_start_time=datetime.datetime.now(datetime.timezone.utc), +) + +# add a subject to the NWB file +subject = Subject(subject_id="subject1", species="Mus musculus") +nwbfile.subject = subject + +# in this example, we have two subjects in the training data and therefore two skeletons. +# each skeleton defines the relationship between the markers. +# Skeleton names must be unique because the Skeleton objects will be added to a Skeletons container object +# which requires unique names. +skeleton1 = Skeleton( + name="subject1_skeleton", + nodes=["front_left_paw", "body", "front_right_paw"], + # edge between front left paw and body, edge between body and front right paw. + # the values are the indices of the nodes in the nodes list. + edges=np.array([[0, 1], [1, 2]], dtype="uint8"), +) +skeleton2 = Skeleton( + name="subject2_skeleton", nodes=["front_left_paw", "body", "front_right_paw"], # edge between front left paw and body, edge between body and front right paw. # the values are the indices of the nodes in the nodes list. edges=np.array([[0, 1], [1, 2]], dtype="uint8"), ) -# create a PoseEstimation object that represents the estimated positions of the front paws and body -# from DLC and references the original video simultaneously recorded from one camera and the labeled -# video that was generated by DLC. multiple videos and cameras can be referenced. +skeletons = Skeletons(skeletons=[skeleton1, skeleton2]) + +# create a device for the camera +camera1 = nwbfile.create_device( + name="camera1", + description="camera for recording behavior", + manufacturer="my manufacturer", +) + +# a PoseEstimationSeries represents the estimated position of a single marker. +# in this example, we have three PoseEstimationSeries: one for the body and one for each front paw. +# a single NWB file contains pose estimation data for a single subject. if you have pose estimates for +# multiple subjects, store them in separate files. +data = np.random.rand(100, 2) # num_frames x (x, y) but can be num_frames x (x, y, z) +timestamps = np.linspace(0, 10, num=100) # a timestamp for every frame +confidence = np.random.rand(100) # a confidence value for every frame +reference_frame = "(0,0,0) corresponds to ..." +confidence_definition = "Softmax output of the deep neural network." + +front_left_paw = PoseEstimationSeries( + name="front_left_paw", + description="Marker placed around fingers of front left paw.", + data=data, + unit="pixels", + reference_frame=reference_frame, + timestamps=timestamps, + confidence=confidence, + confidence_definition=confidence_definition, +) + +data = np.random.rand(100, 2) # num_frames x (x, y) but can be (x, y, z) +confidence = np.random.rand(100) # a confidence value for every frame +body = PoseEstimationSeries( + name="body", + description="Marker placed on center of body.", + data=data, + unit="pixels", + reference_frame=reference_frame, + timestamps=front_left_paw, # link to timestamps of front_left_paw so we don't have to duplicate them + confidence=confidence, + confidence_definition=confidence_definition, +) + +data = np.random.rand(100, 2) # num_frames x (x, y) but can be (x, y, z) +confidence = np.random.rand(100) # a confidence value for every frame +front_right_paw = PoseEstimationSeries( + name="front_right_paw", + description="Marker placed around fingers of front right paw.", + data=data, + unit="pixels", + reference_frame=reference_frame, + timestamps=front_left_paw, # link to timestamps of front_left_paw so we don't have to duplicate them + confidence=confidence, + confidence_definition=confidence_definition, +) + +# store all PoseEstimationSeries in a list +pose_estimation_series = [front_left_paw, body, front_right_paw] + +# create a PoseEstimation object that represents the estimated positions of each node, references +# the original video and labeled video files, and provides metadata on how these estimates were generated. +# multiple videos and cameras can be referenced. pose_estimation = PoseEstimation( + name="PoseEstimation", pose_estimation_series=pose_estimation_series, - description="Estimated positions of front paws using DeepLabCut.", + description="Estimated positions of front paws of subject1 using DeepLabCut.", original_videos=["path/to/camera1.mp4"], labeled_videos=["path/to/camera1_labeled.mp4"], - dimensions=np.array([[640, 480]], dtype="uint16"), # pixel dimensions of the video + dimensions=np.array( + [[640, 480]], dtype="uint16" + ), # pixel dimensions of the video devices=[camera1], scorer="DLC_resnet50_openfieldOct30shuffle1_1600", source_software="DeepLabCut", source_software_version="2.3.8", - skeleton=skeleton, + skeleton=skeleton1, # link to the skeleton ) # next, we specify the ground truth data that was used to train the pose estimation model. # this includes the training video and the ground truth annotations for each frame. -# this is optional. if you don't have ground truth data, you can skip this step. -# create an ImageSeries that represents the raw video that was used to train the pose estimation model +# create an ImageSeries that represents the raw video that was used to train the pose estimation model. +# the video can be stored as an MP4 file that is linked to from this ImageSeries object. +# if there are multiple videos, the names must be unique because they will be added to a SourceVideos +# container object which requires unique names. training_video1 = ImageSeries( name="source_video", description="Training video used to train the pose estimation model.", @@ -157,58 +315,82 @@ training_video1 = ImageSeries( rate=30.0, ) -# create 50 ground truth instances of the skeleton at slightly random positions. -# in this example, each node is visible on every frame. -# the mapping of index in node_locations and node_visibilities to label is defined by the skeleton. - -# the node locations are the (x, y) coordinates of each node in the skeleton. +# initial locations ((x, y) coordinates) of each node in the skeleton. # the order of the nodes is defined by the skeleton. -node_locations = np.array( +node_locations_sk1 = np.array( [ [10.0, 10.0], # front_left_paw [20.0, 20.0], # body [30.0, 10.0], # front_right_paw ] ) +node_locations_sk2 = np.array( + [ + [40.0, 40.0], # front_left_paw + [50.0, 50.0], # body + [60.0, 60.0], # front_right_paw + ] +) -skeleton_instances = [] -for i in range(50): - # add some noise to the node locations from the location on the previous frame - node_locations = node_locations + np.random.rand(3, 2) - instance = SkeletonInstance( +# in this example, frame indices 0, 5, 10, ..., 500 from the training video were used for training. +# each training frame has two skeleton instances, one for each skeleton. +training_frames_list = [] +for i in range(0, 500, 5): + skeleton_instances_list = [] + # add some noise to the node locations from the previous frame + node_locations_sk1 = node_locations_sk1 + np.random.rand(3, 2) + instance_sk1 = SkeletonInstance( + name="skeleton1_instance", id=np.uint(i), - node_locations=node_locations, + node_locations=node_locations_sk1, node_visibility=[ True, # front_left_paw True, # body True, # front_right_paw ], - skeleton=skeleton, # link to the skeleton + skeleton=skeleton1, # link to the skeleton ) - skeleton_instances.append(instance) + skeleton_instances_list.append(instance_sk1) -# create 50 training frames using the training video and the skeleton instances. -# the skeleton instances start with video frame 0 and end with video frame 49. -training_frames_list = [] -for i in range(50): - # names must be unique within a PoseTraining object (we will add them to a PoseTraining object below) + # add some noise to the node locations from the previous frame + node_locations_sk2 = node_locations_sk2 + np.random.rand(3, 2) + instance_sk2 = SkeletonInstance( + name="skeleton2_instance", + id=np.uint(i), + node_locations=node_locations_sk2, + node_visibility=[ + True, # front_left_paw + True, # body + True, # front_right_paw + ], + skeleton=skeleton2, # link to the skeleton + ) + skeleton_instances_list.append(instance_sk2) + + # store the skeleton instances in a SkeletonInstances object + skeleton_instances = SkeletonInstances( + skeleton_instances=skeleton_instances_list + ) + + # TrainingFrame names must be unique because the TrainingFrame objects will be added to a + # TrainingFrames container object which requires unique names. + # the source video frame index is the index of the frame in the source video, which is useful + # for linking the training frames to the source video. training_frame = TrainingFrame( - name="frame_{}".format(i), + name=f"frame_{i}", annotator="Bilbo Baggins", - skeleton_instance=skeleton_instances[i], + skeleton_instances=skeleton_instances, source_video=training_video1, source_video_frame_index=np.uint(i), ) training_frames_list.append(training_frame) -# store the skeletons, training frames, and source videos in their corresponding grouping objects -skeletons = Skeletons(skeletons=[skeleton]) +# store the training frames and source videos in their corresponding container objects training_frames = TrainingFrames(training_frames=training_frames_list) source_videos = SourceVideos(image_series=[training_video1]) # store the skeletons group, training frames group, and source videos group in a PoseTraining object pose_training = PoseTraining( - skeletons=skeletons, training_frames=training_frames, source_videos=source_videos, ) @@ -218,6 +400,7 @@ behavior_pm = nwbfile.create_processing_module( name="behavior", description="processed behavioral data", ) +behavior_pm.add(skeletons) behavior_pm.add(pose_estimation) behavior_pm.add(pose_training) @@ -228,21 +411,23 @@ with NWBHDF5IO(path, mode="w") as io: # read the NWBFile from disk and print out the PoseEstimation and PoseTraining objects # as well as the first training frame -with NWBHDF5IO(path, mode="r", load_namespaces=True) as io: +with NWBHDF5IO(path, mode="r") as io: read_nwbfile = io.read() - read_pe = read_nwbfile.processing["behavior"]["PoseEstimation"] - print(read_pe) + print(read_nwbfile.processing["behavior"]["PoseEstimation"]) + print(read_nwbfile.processing["behavior"]["Skeletons"]["subject1_skeleton"]) read_pt = read_nwbfile.processing["behavior"]["PoseTraining"] - print(read_pt) - print(read_pt.training_frames["frame_0"]) + print(read_pt.training_frames["frame_10"].skeleton_instances["skeleton2_instance"].node_locations[:]) ``` ## Discussion -1. Should we map `PoseEstimationSeries.confidence__definition` -> `PoseEstimationSeries.confidence_definition` and -`PoseEstimation.source_software__version` -> `PoseEstimation.source_software_version` in the Python API? -Note that the Matlab API uses a different format for accessing these fields. +1. Currently, the spec `PoseEstimationSeries.confidence__definition` is mapped to the Python attribute +`PoseEstimationSeries.confidence_definition` and the spec +`PoseEstimation.source_software__version` is mapped to the Python attribute +`PoseEstimation.source_software_version`, only in the Python API. +Note that the Matlab API uses a different format for accessing these fields. Should we maintain this mapping? + - Pros: - Stays consistent with version 0.1.0-0.1.1 - When ndx-pose is integrated into the NWB core, the con below will not be relevant, and we will probably @@ -251,31 +436,22 @@ Note that the Matlab API uses a different format for accessing these fields. - Cons: - When reading data in Python, the code is different depending on whether the Python classes from ndx-pose are used or the classes are generated from the schema directly. + - PyNWB may eventually get rid of custom I/O mapping. + +2. If a user annotates 500 frames of a video, there will be 500 groups. Should the node locations and visibilities +instead be stored in a set of `PoseEstimationSeries`-like `TimeSeries` objects? We can add in the API the ability to +extract a `SkeletonInstance` from a set of those objects and create a set of those objects from a set of +`TrainingFrame` objects. This would also make for a more consistent storage pattern for keypoint data. + +## Handling pose estimates for multiple subjects + +NWB files are designed to store data from a single subject and have only one root-level `Subject` object. +As a result, ndx-pose was designed to store pose estimates from a single subject. +Pose estimates data from different subjects should be stored in separate NWB files. -2. Should we create a typed group that would be contained within a `PoseTraining` object to contain `Skeleton` objects, -e.g., a `Skeletons` group? This would be similar to the `Position` group containing `SpatialSeries` objects and a `BehavioralTimeSeries` group containing `TimeSeries` objects, except that unlike `SpatialSeries` and `TimeSeries`, -`Skeleton` is not really a multi-purpose neurodata type. This makes the most sense for `SourceVideos`, because -`ImageSeries` (i.e., a video), is pretty generic, and `SourceVideos` would tag the video as a source video for training -and the auto-generated functions and variables in the parent `PoseTraining` object would use -"source_videos" instead of "image_series". - -Similarly, should we create typed groups that would be contained within a `PoseTraining` object to contain -`TrainingFrame` objects and `ImageSeries` objects? This type would be purely organizational. -`NWBFile` has an untyped `acquisition` group and an untyped `processing` group for organization and tagging. -A `ProcessingModule` is a typed group that exists solely for organization and tagging with custom names and -descriptions. - -3. Currently, multiple `Skeleton` objects are allowed. Should they be allowed? -If so, how do `Skeleton` objects relate to `Subject` objects? -NWB files are designed to contain data from a single subject. -Initially, ndx-pose was designed to store pose estimates from a single subject. -Data from multiple subjects would be stored in separate NWB files. -See https://github.com/rly/ndx-pose/pull/3 - -4. If a user annotates 500 frames of a video, there will be 500 groups. Should the node locations and visibilities -instead be stored in a set of PoseEstimationSeries-like TimeSeries objects? We can add in the API the ability to -extract a SkeletonInstance from a set of those objects and create a set of those objects from a set of TrainingFrames. -This would also make for a more consistent storage pattern for keypoint data. +Training images can involve multiple skeletons, however. These training images may be the same across subjects, +and therefore the same across NWB files. These training images should be duplicated between files, until +multi-subject support is added to NWB and ndx-pose. See https://github.com/rly/ndx-pose/pull/3 ## Resources diff --git a/spec/ndx-pose.extensions.yaml b/spec/ndx-pose.extensions.yaml index aec3636..dfd335b 100644 --- a/spec/ndx-pose.extensions.yaml +++ b/spec/ndx-pose.extensions.yaml @@ -23,6 +23,10 @@ groups: doc: Array of pairs of indices corresponding to edges between nodes. Index values correspond to row indices of the 'nodes' dataset. Index values use 0-indexing. quantity: '?' + links: + - target_type: Subject + doc: The Subject object in the NWB file, if this Skeleton corresponds to the Subject. + quantity: '?' - neurodata_type_def: PoseEstimationSeries neurodata_type_inc: SpatialSeries doc: Estimated position (x, y) or (x, y, z) of a body part over time. @@ -220,7 +224,7 @@ groups: quantity: '*' - neurodata_type_def: Skeletons neurodata_type_inc: NWBDataInterface - default_name: skeletons + default_name: Skeletons doc: Organizational group to hold skeletons. groups: - neurodata_type_inc: Skeleton @@ -229,12 +233,9 @@ groups: - neurodata_type_def: PoseTraining neurodata_type_inc: NWBDataInterface default_name: PoseTraining - doc: Group that holds images, ground-truth annotations, and metadata for training - a pose estimator. + doc: Group that holds source videos and ground-truth annotations for training a + pose estimator. groups: - - name: skeletons - neurodata_type_inc: Skeletons - doc: Organizational group to hold skeletons. - name: training_frames neurodata_type_inc: TrainingFrames doc: Organizational group to hold training frames. diff --git a/src/pynwb/ndx_pose/testing/mock/pose.py b/src/pynwb/ndx_pose/testing/mock/pose.py index 2b8ebf6..ab97de5 100644 --- a/src/pynwb/ndx_pose/testing/mock/pose.py +++ b/src/pynwb/ndx_pose/testing/mock/pose.py @@ -14,7 +14,6 @@ SkeletonInstances, TrainingFrame, Skeletons, - PoseTraining, ) @@ -118,7 +117,6 @@ def mock_PoseEstimation( if nwbfile is not None: skeletons = Skeletons(skeletons=[skeleton]) - pose_training = PoseTraining(skeletons=skeletons) if "behavior" not in nwbfile.processing: behavior_pm = nwbfile.create_processing_module( @@ -127,7 +125,7 @@ def mock_PoseEstimation( else: behavior_pm = nwbfile.processing["behavior"] behavior_pm.add(pe) - behavior_pm.add(pose_training) + behavior_pm.add(skeletons) return pe diff --git a/src/pynwb/tests/integration/hdf5/test_pose.py b/src/pynwb/tests/integration/hdf5/test_pose.py index 7840814..fd41f1d 100644 --- a/src/pynwb/tests/integration/hdf5/test_pose.py +++ b/src/pynwb/tests/integration/hdf5/test_pose.py @@ -167,6 +167,8 @@ def test_roundtrip(self): Add a PoseEstimation to an NWBFile, write it, read it, and test that the read object matches the original. """ skeleton = mock_Skeleton() + skeletons = Skeletons(skeletons=[skeleton]) + pose_estimation_series = [ mock_PoseEstimationSeries(name=name) for name in skeleton.nodes ] @@ -183,14 +185,11 @@ def test_roundtrip(self): skeleton=skeleton, ) - skeletons = Skeletons(skeletons=[skeleton]) - pose_training = PoseTraining(skeletons=skeletons) - behavior_pm = self.nwbfile.create_processing_module( name="behavior", description="processed behavioral data" ) behavior_pm.add(pe) - behavior_pm.add(pose_training) + behavior_pm.add(skeletons) with NWBHDF5IO(self.path, mode="w") as io: io.write(self.nwbfile) @@ -240,6 +239,8 @@ def addContainer(self): """Add the test PoseTraining to the given NWBFile""" skeleton1 = mock_Skeleton(name="subject1") skeleton2 = mock_Skeleton(name="subject2") + skeletons = Skeletons(skeletons=[skeleton1, skeleton2]) + source_video = mock_source_video(name="source_video") sk1_instance10 = mock_SkeletonInstance(id=np.uint(10), skeleton=skeleton1) sk1_instance11 = mock_SkeletonInstance(id=np.uint(11), skeleton=skeleton1) @@ -265,14 +266,12 @@ def addContainer(self): source_video_frame_index=np.uint(10), ) - skeletons = Skeletons(skeletons=[skeleton1, skeleton2]) training_frames = TrainingFrames( training_frames=[sk1_training_frame, sk2_training_frame] ) source_videos = SourceVideos(image_series=[source_video]) pose_training = PoseTraining( - skeletons=skeletons, training_frames=training_frames, source_videos=source_videos, ) @@ -281,6 +280,7 @@ def addContainer(self): name="behavior", description="processed behavioral data", ) + behavior_pm.add(skeletons) behavior_pm.add(pose_training) def getContainer(self, nwbfile: NWBFile): diff --git a/src/pynwb/tests/test_example_usage.py b/src/pynwb/tests/test_example_usage.py index 0863797..6a4a175 100644 --- a/src/pynwb/tests/test_example_usage.py +++ b/src/pynwb/tests/test_example_usage.py @@ -1,23 +1,17 @@ -"""An example of how to use the ndx-pose extension, packaged as a test so that it is run by pytest.""" +"""Examples of how to use the ndx-pose extension from README.md, packaged as a test so that it is run by pytest.""" -def test_example_usage(): +def test_example_usage_estimates_only(): import datetime import numpy as np from pynwb import NWBFile, NWBHDF5IO + from pynwb.file import Subject from ndx_pose import ( PoseEstimationSeries, PoseEstimation, Skeleton, - SkeletonInstance, - TrainingFrame, - PoseTraining, Skeletons, - TrainingFrames, - SourceVideos, - SkeletonInstances, ) - from pynwb.image import ImageSeries # initialize an NWBFile object nwbfile = NWBFile( @@ -26,6 +20,24 @@ def test_example_usage(): session_start_time=datetime.datetime.now(datetime.timezone.utc), ) + # add a subject to the NWB file + subject = Subject(subject_id="subject1", species="Mus musculus") + nwbfile.subject = subject + + # create a skeleton that define the relationship between the markers. also link this skeleton to the subject. + skeleton = Skeleton( + name="subject1_skeleton", + nodes=["front_left_paw", "body", "front_right_paw"], + # define edges between nodes using the indices of the nodes in the node list. + # this array represents an edge between front left paw and body, and an edge between body and front right paw. + edges=np.array([[0, 1], [1, 2]], dtype="uint8"), + subject=subject, + ) + + # store the skeleton into a Skeletons container object. + # (this is more useful if you have multiple skeletons in your training data) + skeletons = Skeletons(skeletons=[skeleton]) + # create a device for the camera camera1 = nwbfile.create_device( name="camera1", @@ -39,8 +51,6 @@ def test_example_usage(): timestamps = np.linspace(0, 10, num=100) # a timestamp for every frame confidence = np.random.rand(100) # a confidence value for every frame reference_frame = "(0,0,0) corresponds to ..." - - # note the double underscore in "confidence__definition" because this is a property of the "confidence" field confidence_definition = "Softmax output of the deep neural network." front_left_paw = PoseEstimationSeries( @@ -67,7 +77,7 @@ def test_example_usage(): confidence_definition=confidence_definition, ) - data = np.random.rand(100, 2) # num_frames x (x, y) but can be (x, y, z) + data = np.random.rand(100, 2) # num_frames x (x, y) but can be num_frames x (x, y, z) confidence = np.random.rand(100) # a confidence value for every frame front_right_paw = PoseEstimationSeries( name="front_right_paw", @@ -83,21 +93,161 @@ def test_example_usage(): # store all PoseEstimationSeries in a list pose_estimation_series = [front_left_paw, body, front_right_paw] - # create a skeleton that defines the relationship between the markers - skeleton = Skeleton( - name="subject1", + # create a PoseEstimation object that represents the estimated positions of each node, references + # the original video and labeled video files, and provides metadata on how these estimates were generated. + # multiple videos and cameras can be referenced. + pose_estimation = PoseEstimation( + name="PoseEstimation", + pose_estimation_series=pose_estimation_series, + description="Estimated positions of front paws of subject1 using DeepLabCut.", + original_videos=["path/to/camera1.mp4"], + labeled_videos=["path/to/camera1_labeled.mp4"], + dimensions=np.array( + [[640, 480]], dtype="uint16" + ), # pixel dimensions of the video + devices=[camera1], + scorer="DLC_resnet50_openfieldOct30shuffle1_1600", + source_software="DeepLabCut", + source_software_version="2.3.8", + skeleton=skeleton, # link to the skeleton object + ) + + # create a "behavior" processing module to store the PoseEstimation and Skeletons objects + behavior_pm = nwbfile.create_processing_module( + name="behavior", + description="processed behavioral data", + ) + behavior_pm.add(skeletons) + behavior_pm.add(pose_estimation) + + # write the NWBFile to disk + path = "test_pose.nwb" + with NWBHDF5IO(path, mode="w") as io: + io.write(nwbfile) + + # read the NWBFile from disk and print out the PoseEstimation and Skeleton objects + # as well as the first training frame + with NWBHDF5IO(path, mode="r") as io: + read_nwbfile = io.read() + print(read_nwbfile.processing["behavior"]["PoseEstimation"]) + print(read_nwbfile.processing["behavior"]["Skeletons"]["subject1_skeleton"]) + + +def test_example_usage_training(): + import datetime + import numpy as np + from pynwb import NWBFile, NWBHDF5IO + from pynwb.file import Subject + from pynwb.image import ImageSeries + from ndx_pose import ( + PoseEstimationSeries, + PoseEstimation, + Skeleton, + SkeletonInstance, + TrainingFrame, + PoseTraining, + Skeletons, + TrainingFrames, + SourceVideos, + SkeletonInstances, + ) + + # initialize an NWBFile object + nwbfile = NWBFile( + session_description="session_description", + identifier="identifier", + session_start_time=datetime.datetime.now(datetime.timezone.utc), + ) + + # add a subject to the NWB file + subject = Subject(subject_id="subject1", species="Mus musculus") + nwbfile.subject = subject + + # in this example, we have two subjects in the training data and therefore two skeletons. + # each skeleton defines the relationship between the markers. + # Skeleton names must be unique because the Skeleton objects will be added to a Skeletons container object + # which requires unique names. + skeleton1 = Skeleton( + name="subject1_skeleton", nodes=["front_left_paw", "body", "front_right_paw"], # edge between front left paw and body, edge between body and front right paw. # the values are the indices of the nodes in the nodes list. edges=np.array([[0, 1], [1, 2]], dtype="uint8"), ) + skeleton2 = Skeleton( + name="subject2_skeleton", + nodes=["front_left_paw", "body", "front_right_paw"], + # edge between front left paw and body, edge between body and front right paw. + # the values are the indices of the nodes in the nodes list. + edges=np.array([[0, 1], [1, 2]], dtype="uint8"), + ) + + skeletons = Skeletons(skeletons=[skeleton1, skeleton2]) + + # create a device for the camera + camera1 = nwbfile.create_device( + name="camera1", + description="camera for recording behavior", + manufacturer="my manufacturer", + ) + + # a PoseEstimationSeries represents the estimated position of a single marker. + # in this example, we have three PoseEstimationSeries: one for the body and one for each front paw. + # a single NWB file contains pose estimation data for a single subject. if you have pose estimates for + # multiple subjects, store them in separate files. + data = np.random.rand(100, 2) # num_frames x (x, y) but can be num_frames x (x, y, z) + timestamps = np.linspace(0, 10, num=100) # a timestamp for every frame + confidence = np.random.rand(100) # a confidence value for every frame + reference_frame = "(0,0,0) corresponds to ..." + confidence_definition = "Softmax output of the deep neural network." + + front_left_paw = PoseEstimationSeries( + name="front_left_paw", + description="Marker placed around fingers of front left paw.", + data=data, + unit="pixels", + reference_frame=reference_frame, + timestamps=timestamps, + confidence=confidence, + confidence_definition=confidence_definition, + ) - # create a PoseEstimation object that represents the estimated positions of the front paws and body - # from DLC and references the original video simultaneously recorded from one camera and the labeled - # video that was generated by DLC. multiple videos and cameras can be referenced. + data = np.random.rand(100, 2) # num_frames x (x, y) but can be (x, y, z) + confidence = np.random.rand(100) # a confidence value for every frame + body = PoseEstimationSeries( + name="body", + description="Marker placed on center of body.", + data=data, + unit="pixels", + reference_frame=reference_frame, + timestamps=front_left_paw, # link to timestamps of front_left_paw so we don't have to duplicate them + confidence=confidence, + confidence_definition=confidence_definition, + ) + + data = np.random.rand(100, 2) # num_frames x (x, y) but can be (x, y, z) + confidence = np.random.rand(100) # a confidence value for every frame + front_right_paw = PoseEstimationSeries( + name="front_right_paw", + description="Marker placed around fingers of front right paw.", + data=data, + unit="pixels", + reference_frame=reference_frame, + timestamps=front_left_paw, # link to timestamps of front_left_paw so we don't have to duplicate them + confidence=confidence, + confidence_definition=confidence_definition, + ) + + # store all PoseEstimationSeries in a list + pose_estimation_series = [front_left_paw, body, front_right_paw] + + # create a PoseEstimation object that represents the estimated positions of each node, references + # the original video and labeled video files, and provides metadata on how these estimates were generated. + # multiple videos and cameras can be referenced. pose_estimation = PoseEstimation( + name="PoseEstimation", pose_estimation_series=pose_estimation_series, - description="Estimated positions of front paws using DeepLabCut.", + description="Estimated positions of front paws of subject1 using DeepLabCut.", original_videos=["path/to/camera1.mp4"], labeled_videos=["path/to/camera1_labeled.mp4"], dimensions=np.array( @@ -107,14 +257,16 @@ def test_example_usage(): scorer="DLC_resnet50_openfieldOct30shuffle1_1600", source_software="DeepLabCut", source_software_version="2.3.8", - skeleton=skeleton, + skeleton=skeleton1, # link to the skeleton ) # next, we specify the ground truth data that was used to train the pose estimation model. # this includes the training video and the ground truth annotations for each frame. - # this is optional. if you don't have ground truth data, you can skip this step. - # create an ImageSeries that represents the raw video that was used to train the pose estimation model + # create an ImageSeries that represents the raw video that was used to train the pose estimation model. + # the video can be stored as an MP4 file that is linked to from this ImageSeries object. + # if there are multiple videos, the names must be unique because they will be added to a SourceVideos + # container object which requires unique names. training_video1 = ImageSeries( name="source_video", description="Training video used to train the pose estimation model.", @@ -126,63 +278,69 @@ def test_example_usage(): rate=30.0, ) - # create 50 ground truth instances of the skeleton at slightly random positions. - # in this example, each node is visible on every frame. - # the mapping of index in node_locations and node_visibilities to label is defined by the skeleton. - - # the node locations are the (x, y) coordinates of each node in the skeleton. + # initial locations ((x, y) coordinates) of each node in the skeleton. # the order of the nodes is defined by the skeleton. - node_locations = np.array( + node_locations_sk1 = np.array( [ [10.0, 10.0], # front_left_paw [20.0, 20.0], # body [30.0, 10.0], # front_right_paw ] ) + node_locations_sk2 = np.array( + [ + [40.0, 40.0], # front_left_paw + [50.0, 50.0], # body + [60.0, 60.0], # front_right_paw + ] + ) - # create 50 training frames using the training video and the skeleton instances. - # the skeleton instances start with video frame 0 and end with video frame 49. + # in this example, frame indices 0, 5, 10, ..., 500 from the training video were used for training. + # each training frame has two skeleton instances, one for each skeleton. training_frames_list = [] - for i in range(50): + for i in range(0, 500, 5): skeleton_instances_list = [] - # add some noise to the node locations from the location on the previous frame - node_locations = node_locations + np.random.rand(3, 2) - instance_1 = SkeletonInstance( - name=f"skeleton1_instance{i}", + # add some noise to the node locations from the previous frame + node_locations_sk1 = node_locations_sk1 + np.random.rand(3, 2) + instance_sk1 = SkeletonInstance( + name="skeleton1_instance", id=np.uint(i), - node_locations=node_locations, + node_locations=node_locations_sk1, node_visibility=[ True, # front_left_paw True, # body True, # front_right_paw ], - skeleton=skeleton, # link to the skeleton + skeleton=skeleton1, # link to the skeleton ) - skeleton_instances_list.append(instance_1) + skeleton_instances_list.append(instance_sk1) - # add some noise to the node locations from the location on the previous frame - node_locations = node_locations + np.random.rand(3, 2) - instance_2 = SkeletonInstance( - name=f"skeleton2_instance{i}", + # add some noise to the node locations from the previous frame + node_locations_sk2 = node_locations_sk2 + np.random.rand(3, 2) + instance_sk2 = SkeletonInstance( + name="skeleton2_instance", id=np.uint(i), - node_locations=node_locations, + node_locations=node_locations_sk2, node_visibility=[ True, # front_left_paw True, # body True, # front_right_paw ], - skeleton=skeleton, # link to the skeleton + skeleton=skeleton2, # link to the skeleton ) - skeleton_instances_list.append(instance_2) + skeleton_instances_list.append(instance_sk2) # store the skeleton instances in a SkeletonInstances object skeleton_instances = SkeletonInstances( skeleton_instances=skeleton_instances_list ) - # names must be unique within a PoseTraining object (we will add them to a PoseTraining object below) + # TrainingFrame names must be unique because the TrainingFrame objects will be added to a + # TrainingFrames container object which requires unique names. + # the source video frame index is the index of the frame in the source video, which is useful + # for linking the training frames to the source video. training_frame = TrainingFrame( - name="frame_{}".format(i), + name=f"frame_{i}", annotator="Bilbo Baggins", skeleton_instances=skeleton_instances, source_video=training_video1, @@ -190,14 +348,12 @@ def test_example_usage(): ) training_frames_list.append(training_frame) - # store the skeletons, training frames, and source videos in their corresponding grouping objects - skeletons = Skeletons(skeletons=[skeleton]) + # store the training frames and source videos in their corresponding container objects training_frames = TrainingFrames(training_frames=training_frames_list) source_videos = SourceVideos(image_series=[training_video1]) # store the skeletons group, training frames group, and source videos group in a PoseTraining object pose_training = PoseTraining( - skeletons=skeletons, training_frames=training_frames, source_videos=source_videos, ) @@ -207,6 +363,7 @@ def test_example_usage(): name="behavior", description="processed behavioral data", ) + behavior_pm.add(skeletons) behavior_pm.add(pose_estimation) behavior_pm.add(pose_training) @@ -217,10 +374,9 @@ def test_example_usage(): # read the NWBFile from disk and print out the PoseEstimation and PoseTraining objects # as well as the first training frame - with NWBHDF5IO(path, mode="r", load_namespaces=True) as io: + with NWBHDF5IO(path, mode="r") as io: read_nwbfile = io.read() - read_pe = read_nwbfile.processing["behavior"]["PoseEstimation"] - print(read_pe) + print(read_nwbfile.processing["behavior"]["PoseEstimation"]) + print(read_nwbfile.processing["behavior"]["Skeletons"]["subject1_skeleton"]) read_pt = read_nwbfile.processing["behavior"]["PoseTraining"] - print(read_pt) - print(read_pt.training_frames["frame_0"]) + print(read_pt.training_frames["frame_10"].skeleton_instances["skeleton2_instance"].node_locations[:]) diff --git a/src/pynwb/tests/unit/test_pose.py b/src/pynwb/tests/unit/test_pose.py index 2082527..b6e3b0a 100644 --- a/src/pynwb/tests/unit/test_pose.py +++ b/src/pynwb/tests/unit/test_pose.py @@ -5,6 +5,7 @@ from pynwb.device import Device from pynwb.testing import TestCase from pynwb.image import RGBImage +from pynwb.file import Subject from ndx_pose import ( PoseEstimationSeries, @@ -63,18 +64,30 @@ def test_constructor(self): class TestSkeleton(TestCase): def test_init(self): + subject = Subject(subject_id="MOUSE001", species="Mus musculus") skeleton = Skeleton( name="subject1", nodes=["front_left_paw", "body", "front_right_paw"], # edge between front left paw and body, edge between body and front right paw. # the values are the indices of the nodes in the nodes list. edges=np.array([[0, 1], [1, 2]], dtype="uint8"), + subject=subject, + ) self.assertEqual(skeleton.name, "subject1") self.assertEqual(skeleton.nodes, ["front_left_paw", "body", "front_right_paw"]) np.testing.assert_array_equal( skeleton.edges, np.array([[0, 1], [1, 2]], dtype="uint8") ) + self.assertIs(skeleton.subject, subject) + + def test_init_no_subject(self): + skeleton = Skeleton( + name="subject1", + nodes=["front_left_paw", "body", "front_right_paw"], + edges=np.array([[0, 1], [1, 2]], dtype="uint8"), + ) + self.assertIsNone(skeleton.subject) class TestPoseEstimationConstructor(TestCase): @@ -327,20 +340,15 @@ def test_constructor(self): source_video_frame_index=np.uint(10), ) - skeletons = Skeletons(skeletons=[skeleton1, skeleton2]) training_frames = TrainingFrames( training_frames=[sk1_training_frame, sk2_training_frame] ) source_videos = SourceVideos(image_series=[source_video]) pose_training = PoseTraining( - skeletons=skeletons, training_frames=training_frames, source_videos=source_videos, ) - self.assertEqual(len(pose_training.skeletons.skeletons), 2) - self.assertIs(pose_training.skeletons.skeletons["subject1"], skeleton1) - self.assertIs(pose_training.skeletons.skeletons["subject2"], skeleton2) self.assertEqual(len(pose_training.training_frames.training_frames), 2) self.assertIs( pose_training.training_frames.training_frames["skeleton1_frame10"], @@ -360,6 +368,7 @@ class TestPoseTrainingImages(TestCase): def test_constructor(self): skeleton1 = mock_Skeleton(name="subject1") skeleton2 = mock_Skeleton(name="subject2") + source_frame_10 = mock_source_frame(name="source_frame_10") sk1_instance10 = mock_SkeletonInstance(id=np.uint(10), skeleton=skeleton1) sk1_instance11 = mock_SkeletonInstance(id=np.uint(11), skeleton=skeleton1) @@ -388,18 +397,13 @@ def test_constructor(self): source_video_frame_index=np.uint(11), ) - skeletons = Skeletons(skeletons=[skeleton1, skeleton2]) training_frames = TrainingFrames( training_frames=[sk1_training_frame, sk2_training_frame] ) pose_training = PoseTraining( - skeletons=skeletons, training_frames=training_frames, ) - self.assertEqual(len(pose_training.skeletons.skeletons), 2) - self.assertIs(pose_training.skeletons.skeletons["subject1"], skeleton1) - self.assertIs(pose_training.skeletons.skeletons["subject2"], skeleton2) self.assertEqual(len(pose_training.training_frames.training_frames), 2) self.assertIs( pose_training.training_frames.training_frames["frame10"], sk1_training_frame diff --git a/src/spec/create_extension_spec.py b/src/spec/create_extension_spec.py index 37417b3..bf3b0ff 100644 --- a/src/spec/create_extension_spec.py +++ b/src/spec/create_extension_spec.py @@ -72,14 +72,20 @@ def main(): quantity="?", ), ], + links=[ + NWBLinkSpec( + doc="The Subject object in the NWB file, if this Skeleton corresponds to the Subject.", + target_type="Subject", + quantity="?", + ), + ], ) skeletons = NWBGroupSpec( neurodata_type_def="Skeletons", neurodata_type_inc="NWBDataInterface", doc="Organizational group to hold skeletons.", - # this is meant to be used in a PoseTraining object which will enforce this name - default_name="skeletons", + default_name="Skeletons", groups=[ NWBGroupSpec( neurodata_type_inc="Skeleton", @@ -365,14 +371,9 @@ def main(): pose_training = NWBGroupSpec( neurodata_type_def="PoseTraining", neurodata_type_inc="NWBDataInterface", - doc="Group that holds images, ground-truth annotations, and metadata for training a pose estimator.", + doc="Group that holds source videos and ground-truth annotations for training a pose estimator.", default_name="PoseTraining", groups=[ - NWBGroupSpec( - name="skeletons", - neurodata_type_inc="Skeletons", - doc="Organizational group to hold skeletons.", - ), NWBGroupSpec( name="training_frames", neurodata_type_inc="TrainingFrames", From cb18c887a0a99d018f7d95410181a83a39d2217e Mon Sep 17 00:00:00 2001 From: rly Date: Wed, 13 Mar 2024 18:15:37 -0700 Subject: [PATCH 6/6] MInor fixes to spec and mock functions --- spec/ndx-pose.extensions.yaml | 6 +++--- src/pynwb/ndx_pose/testing/mock/pose.py | 26 +++++++++++-------------- src/spec/create_extension_spec.py | 5 ++--- 3 files changed, 16 insertions(+), 21 deletions(-) diff --git a/spec/ndx-pose.extensions.yaml b/spec/ndx-pose.extensions.yaml index dfd335b..992cba4 100644 --- a/spec/ndx-pose.extensions.yaml +++ b/spec/ndx-pose.extensions.yaml @@ -163,8 +163,8 @@ groups: - neurodata_type_def: SkeletonInstance neurodata_type_inc: NWBDataInterface default_name: skeleton_instance - doc: 'Group that holds ground-truth pose data for a single instance of a skeleton - in a single frame. ' + doc: Group that holds ground-truth pose data for a single instance of a skeleton + in a single frame. attributes: - name: id dtype: uint8 @@ -207,7 +207,7 @@ groups: - neurodata_type_def: SkeletonInstances neurodata_type_inc: NWBDataInterface default_name: skeleton_instances - doc: Organizational group to hold skeleton instances.This is meant to be used within + doc: Organizational group to hold skeleton instances. This is meant to be used within a TrainingFrame. groups: - neurodata_type_inc: SkeletonInstance diff --git a/src/pynwb/ndx_pose/testing/mock/pose.py b/src/pynwb/ndx_pose/testing/mock/pose.py index ab97de5..6c7c6bb 100644 --- a/src/pynwb/ndx_pose/testing/mock/pose.py +++ b/src/pynwb/ndx_pose/testing/mock/pose.py @@ -1,4 +1,4 @@ -from typing import Optional, Any +from typing import Optional, Any, Union import numpy as np from pynwb import NWBFile @@ -135,8 +135,8 @@ def mock_SkeletonInstance( name: Optional[str] = None, id: Optional[np.uint] = np.uint(10), node_locations: Optional[Any] = None, - node_visibility: list = None, - skeleton: Skeleton = None, + node_visibility: Optional[list] = None, + skeleton: Optional[Skeleton] = None, ): if node_locations is None and node_visibility is None: num_nodes = 3 @@ -159,6 +159,7 @@ def mock_SkeletonInstance( name = skeleton.name + "_instance_" + str(id) if node_visibility is None: node_visibility = np.ones(num_nodes, dtype="bool") + skeleton_instance = SkeletonInstance( name=name, id=id, @@ -170,7 +171,9 @@ def mock_SkeletonInstance( return skeleton_instance -def mock_SkeletonInstances(skeleton_instances=None): +def mock_SkeletonInstances( + skeleton_instances: Union[SkeletonInstance, list[SkeletonInstance]] = None +): if skeleton_instances is None: skeleton_instances = [mock_SkeletonInstance()] if not isinstance(skeleton_instances, list): @@ -202,21 +205,14 @@ def mock_source_frame( ): return RGBImage(name=name, data=np.random.rand(640, 480, 3).astype("uint8")) -def mock_source_frame( - *, - name: Optional[str] = None, -): - return RGBImage(name=name, data=np.random.rand(640, 480, 3).astype("uint8")) - - def mock_TrainingFrame( *, name: Optional[str] = None, annotator: Optional[str] = "Awesome Possum", - skeleton_instances: SkeletonInstances = None, - source_video: ImageSeries = None, - source_frame: Image = None, - source_video_frame_index: np.uint = np.uint(10), + skeleton_instances: Optional[SkeletonInstances] = None, + source_video: Optional[ImageSeries] = None, + source_frame: Optional[Image] = None, + source_video_frame_index: Optional[np.uint] = np.uint(10), ): training_frame = TrainingFrame( name=name or name_generator("TrainingFrame"), diff --git a/src/spec/create_extension_spec.py b/src/spec/create_extension_spec.py index bf3b0ff..39bbe88 100644 --- a/src/spec/create_extension_spec.py +++ b/src/spec/create_extension_spec.py @@ -226,7 +226,7 @@ def main(): neurodata_type_def="SkeletonInstance", neurodata_type_inc="NWBDataInterface", doc=( - "Group that holds ground-truth pose data for a single instance of a skeleton in a single frame. " + "Group that holds ground-truth pose data for a single instance of a skeleton in a single frame." ), default_name="skeleton_instance", links=[ @@ -272,8 +272,7 @@ def main(): neurodata_type_def="SkeletonInstances", neurodata_type_inc="NWBDataInterface", doc=( - "Organizational group to hold skeleton instances." - "This is meant to be used within a TrainingFrame." + "Organizational group to hold skeleton instances. This is meant to be used within a TrainingFrame." ), default_name="skeleton_instances", groups=[