From aa8fc54e55dd1e2d575d392dd4a73e661bfee478 Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Fri, 8 Sep 2023 15:54:16 -0700 Subject: [PATCH 01/12] WIP: Example visually showing the stages of tractometry. This is potentially useful as an explanatory example. I am also using this to create new figures to explain the process in talks/etc. --- .../plot_stages_of_tractometry.py | 447 ++++++++++++++++++ examples/howto_examples/plot_viz.py | 12 +- 2 files changed, 455 insertions(+), 4 deletions(-) create mode 100644 examples/howto_examples/plot_stages_of_tractometry.py diff --git a/examples/howto_examples/plot_stages_of_tractometry.py b/examples/howto_examples/plot_stages_of_tractometry.py new file mode 100644 index 000000000..e1ca6ed17 --- /dev/null +++ b/examples/howto_examples/plot_stages_of_tractometry.py @@ -0,0 +1,447 @@ +""" +============================ +The stages of tractometry +============================ + +This example visualizes the different stages of tractometry, from the +preprocessed diffusion data to the final tract profiles. We will use the Fury +library to visualize the results of pyAFQ in high-quality, publication-ready +figures. + +""" + +############################################################################## +# In this set of examples, we will use the `fury `_ +# library to visualize outputs of pyAFQ as publication-ready figures. + +import os +import os.path as op +import nibabel as nib +import numpy as np + +from dipy.io.streamline import load_trk +from dipy.tracking.streamline import transform_streamlines, set_number_of_points +from dipy.core.gradients import gradient_table +from dipy.align import resample + +from fury import actor, window +from fury.actor import colormap_lookup_table +from fury.colormap import create_colormap +from matplotlib.cm import tab20 + +import AFQ.data.fetch as afd +from AFQ.viz.utils import gen_color_dict + +############################################################################# +# +# .. note:: +# A virtual frame buffer is needed if you are running this example on +# a machine that is not connected to a display ("headless"). If this is +# the case, you can either run this example with the environment variable +# "XVFB" set to "1" or "True" or you can remove the if statement below, +# which will start a virtual frame buffer for you. + +if os.environ.get("XVFB", False): + print("Initializing XVFB") + import xvfbwrapper + from xvfbwrapper import Xvfb + + vdisplay = Xvfb() + vdisplay.start() + +############################################################################### +# Get some data from HBN POD2 +# ---------------------------- +# The Healthy Brain Network Preprocessed Open Diffusion Derivatives (HBN POD2) +# is a collection of resources based on the Healthy Brain Network dataset +# [1, 2]_. HBN POD2 includes data derivatives - including pyAFQ derivatives - +# from more than 2,000 subjects. The data and the derivatives can be browsed at +# https://fcp-indi.s3.amazonaws.com/index.html#data/Projects/HBN/BIDS_curated/ +# +# Here, we will visualize the results from one subject, together with their +# anatomy and using several variations. We start by downloading their +# pyAFQ-processed data using fetcher functions that download both the +# preprocessed data, as well as the pyAFQ-processed data (Note that this +# will take up about 1.75 GB of disk space): + +afd.fetch_hbn_preproc(["NDARAA948VFH"]) +study_path = afd.fetch_hbn_afq(["NDARAA948VFH"])[1] + +############################################################################# +# Visualize the processed dMRI data +# --------------------------------- +# The HBN POD2 dataset was processed using the ``qsiprep`` pipeline. The +# results from this processing are stored within a sub-folder of the +# derivatives folder wthin the study folder. +# Here, we will start by visualizing the diffusion data. We read in the +# diffusion data, as well as the gradient table, using the `nibabel` library. +# We then extract the b0, b1000, and b2000 volumes from the diffusion data. +# We will use the `actor.slicer` function from `fury` to visualize these. This +# function takes a 3D volume as input and returns a `slicer` actor, which can +# then be added to a `window.Scene` object. We create a helper function that +# will create a slicer actor for a given volume and a given slice along the x, +# y, or z dimension. We then call this function three times, once for each of +# the b0, b1000, and b2000 volumes, and add the resulting slicer actors to a +# scene. We set the camera on the scene to a view that we like, and then we +# record the scene into a png file. We do this for each of the three volumes. + +deriv_path = op.join( + study_path, "derivatives") + +qsiprep_path = op.join( + deriv_path, + 'qsiprep', + 'sub-NDARAA948VFH', + 'ses-HBNsiteRU') + +dmri_img = nib.load(op.join( + qsiprep_path, + 'dwi', + 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi.nii.gz')) + +gtab = gradient_table(*[op.join( + qsiprep_path, + 'dwi', + f'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi.{ext}') for ext in ['bval', 'bvec']]) + + +dmri_data = dmri_img.get_fdata() + +dmri_b0 = dmri_data[..., 0] +dmri_b1000 = dmri_data[..., 1] +dmri_b2000 = dmri_data[..., 65] + + +def slice_volume(data, x=None, y=None, z=None): + slicer_actors = [] + slicer_actor_z = actor.slicer(data) + if z is not None: + slicer_actor_z.display_extent( + 0, data.shape[0] - 1, + 0, data.shape[1] - 1, + z, z) + slicer_actors.append(slicer_actor_z) + if y is not None: + slicer_actor_y = slicer_actor_z.copy() + slicer_actor_y.display_extent( + 0, data.shape[0] - 1, + y, y, + 0, data.shape[2] - 1) + slicer_actors.append(slicer_actor_y) + if x is not None: + slicer_actor_x = slicer_actor_z.copy() + slicer_actor_x.display_extent( + x, x, + 0, data.shape[1] - 1, + 0, data.shape[2] - 1) + slicer_actors.append(slicer_actor_x) + + return slicer_actors + +slicers_b0 = slice_volume(dmri_b0, x=dmri_b0.shape[0] // 2, z=dmri_b0.shape[-1] // 3) +slicers_b1000 = slice_volume(dmri_b1000, x=dmri_b0.shape[0] // 2, z=dmri_b0.shape[-1] // 3) +slicers_b2000 = slice_volume(dmri_b2000, x=dmri_b0.shape[0] // 2, z=dmri_b0.shape[-1] // 3) + +for bval, slicers in zip([0, 1000, 2000], [slicers_b0, slicers_b1000, slicers_b2000]): + scene = window.Scene() + for slicer in slicers: + scene.add(slicer) + scene.set_camera( + position=(238.04, 174.48, 143.04), + focal_point=(96.32, 110.34, 84.48), + view_up=(-0.33, -0.12, 0.94)) + + scene.background((1, 1, 1)) + window.record(scene, out_path=f'b{bval}.png', size=(2400, 2400)) + + +############################################################################# +# Visualizing whole-brain tractography +# ------------------------------------ +# One of the first steps of the pyAFQ pipeline is to generate whole-brain +# tractography. We will visualize the results of this step. We start by reading +# in the FA image, which is used as a reference for the tractography. We then + +afq_path = op.join( + deriv_path, + 'afq', + 'sub-NDARAA948VFH', + 'ses-HBNsiteRU') + +fa_img = nib.load(op.join(afq_path, + 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_model-DKI_FA.nii.gz')) + + +sft_whole_brain = load_trk(op.join(afq_path, + 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_space-RASMM_model-probCSD_tractography.trk'), fa_img) + + +############################################################################# +# Transform into the T1w reference frame +# -------------------------------------- +# Next, we would like to visualize the whole-brain tractography in the context +# of the T1w measurement. We read in this data and transform the bundle +# coordinates, first into the RASMM common coordinate frame and then +# subsequently into the coordinate frame of the T1-weighted data (if you find +# this confusing, you can brush up on this topic in the +# `nibabel documentation `_). + + +t1w_img = nib.load(op.join(deriv_path, + 'qsiprep/sub-NDARAA948VFH/anat/sub-NDARAA948VFH_desc-preproc_T1w.nii.gz')) +t1w = t1w_img.get_fdata() +sft_whole_brain.to_rasmm() +whole_brain_t1w = transform_streamlines( + sft_whole_brain.streamlines, + np.linalg.inv(t1w_img.affine)) + + +def lines_as_tubes(sl, line_width, **kwargs): + line_actor = actor.line(sl, **kwargs) + line_actor.GetProperty().SetRenderLinesAsTubes(1) + line_actor.GetProperty().SetLineWidth(line_width) + return line_actor + + +whole_brain_actor = lines_as_tubes(whole_brain_t1w, 2) +slicers = slice_volume(t1w, x=t1w.shape[0] // 2, z=t1w.shape[-1] // 3) + +scene = window.Scene() + +scene.add(whole_brain_actor) +for slicer in slicers: + scene.add(slicer) + +scene.set_camera(position=(238.04, 174.48, 143.04), + focal_point=(96.32, 110.34, 84.48), + view_up=(-0.33, -0.12, 0.94)) + +scene.background((1, 1, 1)) +window.record(scene, out_path='whole_brain.png', size=(2400, 2400)) + +############################################################################# +# Whole brain with waypoints +# -------------------------------------- +# + + +scene.clear() +whole_brain_actor = lines_as_tubes(whole_brain_t1w, 2) +slicers = slice_volume(t1w, x=t1w.shape[0] // 2, z=t1w.shape[-1] // 3) + +scene.add(whole_brain_actor) +for slicer in slicers: + scene.add(slicer) + +scene.background((1, 1, 1)) + +waypoint1 = nib.load( + op.join( + afq_path, + "ROIs", "sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_desc-ROI-ARC_L-1-include.nii.gz")) + +waypoint2 = nib.load( + op.join( + afq_path, + "ROIs", "sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_desc-ROI-ARC_L-2-include.nii.gz")) + +waypoint1_xform = resample(waypoint1, t1w_img) +waypoint2_xform = resample(waypoint2, t1w_img) +waypoint1_data = waypoint1_xform.get_fdata() > 0 +waypoint2_data = waypoint2_xform.get_fdata() > 0 + +surface_color = tab20.colors[0] + +waypoint1_actor = actor.contour_from_roi(waypoint1_data, + color=surface_color, + opacity=0.5) + +waypoint2_actor = actor.contour_from_roi(waypoint2_data, + color=surface_color, + opacity=0.5) + +scene.add(waypoint1_actor) +scene.add(waypoint2_actor) + +window.record(scene, out_path='whole_brain_with_waypoints.png', size=(2400, 2400)) + +bundle_path = op.join(afq_path, + 'bundles') + + +############################################################################# +# Show the bundle +# --------------- +# The bundle coordinates from pyAFQ are always saved in the reference frame of +# the diffusion data from which they are generated, so we need an image file +# with the dMRI coordinates as a reference for loading the data (we could also +# use `"same"` here). + +fa_img = nib.load(op.join(afq_path, + 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_model-DKI_FA.nii.gz')) +fa = fa_img.get_fdata() +sft_arc = load_trk(op.join(bundle_path, + 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_space-RASMM_model-CSD_desc-prob-afq-ARC_L_tractography.trk'), fa_img) + +sft_arc.to_rasmm() +arc_t1w = transform_streamlines(sft_arc.streamlines, + np.linalg.inv(t1w_img.affine)) + + +bundles = ["ARC_R", "ARC_L", + "ATR_R", "ATR_L", + "CST_R", "CST_L", + "IFO_R", "IFO_L", + "ILF_R", "ILF_L", + "SLF_R", "SLF_L", + "UNC_R", "UNC_L", + "CGC_R", "CGC_L", + "Orbital", "AntFrontal", "SupFrontal", "Motor", + "SupParietal", "PostParietal", "Temporal", "Occipital"] + +color_dict = gen_color_dict(bundles) + +arc_actor = lines_as_tubes(arc_t1w, 8, colors=color_dict['ARC_L']) +scene.clear() + +scene.add(arc_actor) +for slicer in slicers: + scene.add(slicer) + +scene.add(waypoint1_actor) +scene.add(waypoint2_actor) + +window.record(scene, out_path='arc1.png', size=(2400, 2400)) + +############################################################################# +# Clean bundle +# --------------- + +scene.clear() + +scene.add(arc_actor) +for slicer in slicers: + scene.add(slicer) + +window.record(scene, out_path='arc2.png', size=(2400, 2400)) + +clean_bundles_path = op.join(afq_path, + 'clean_bundles') + +sft_arc = load_trk(op.join(clean_bundles_path, + 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_space-RASMM_model-CSD_desc-prob-afq-ARC_L_tractography.trk'), fa_img) + +sft_arc.to_rasmm() +arc_t1w = transform_streamlines(sft_arc.streamlines, + np.linalg.inv(t1w_img.affine)) + + +arc_actor = lines_as_tubes(arc_t1w, 8, colors=tab20.colors[18]) +scene.clear() + +scene.add(arc_actor) +for slicer in slicers: + scene.add(slicer) + +window.record(scene, out_path='arc3.png', size=(2400, 2400)) + +############################################################################# +# Show the values of tissue properties along the bundle +# --------------- + +arc_actor = lines_as_tubes(arc_t1w, 8, + colors=resample(fa_img, t1w_img).get_fdata(), + lookup_colormap=colormap_lookup_table( + scale_range=(0, 1), + hue_range=(0, 1), + saturation_range=(1, 1), + value_range=(0.8, 0.8))) +scene.clear() + +scene.add(arc_actor) +for slicer in slicers: + scene.add(slicer) + +window.record(scene, out_path='arc4.png', size=(2400, 2400)) + +############################################################################# +# Core of the bundle and tract profile +# ------------------------------------- + +core_arc = np.median(np.asarray(set_number_of_points(arc_t1w, 20)), axis=0) + +from dipy.stats.analysis import afq_profile +sft_arc.to_vox() +arc_profile = afq_profile(fa, sft_arc.streamlines, affine=np.eye(4), + n_points=20) + +core_arc_actor = lines_as_tubes( + [core_arc], + 40, + colors=create_colormap(arc_profile, 'viridis') +) + +scene.clear() + +for slicer in slicers: + scene.add(slicer) +scene.add(arc_actor) +scene.add(core_arc_actor) + +window.record(scene, out_path='arc5.png', size=(2400, 2400)) + +scene.clear() + +for slicer in slicers: + scene.add(slicer) + +for bundle in bundles: + sft = load_trk(op.join(clean_bundles_path, + f'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_space-RASMM_model-CSD_desc-prob-afq-{bundle}_tractography.trk'), fa_img) + + sft.to_rasmm() + bundle_t1w = transform_streamlines(sft.streamlines, + np.linalg.inv(t1w_img.affine)) + + bundle_actor = lines_as_tubes(bundle_t1w, 8, colors=color_dict[bundle]) + scene.add(bundle_actor) + +window.record(scene, out_path='all_bundles.png', size=(2400, 2400)) + + +scene.clear() + +for slicer in slicers: + scene.add(slicer) + +tract_profiles = [] +for bundle in bundles: + sft = load_trk(op.join(clean_bundles_path, + f'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_space-RASMM_model-CSD_desc-prob-afq-{bundle}_tractography.trk'), fa_img) + sft.to_rasmm() + bundle_t1w = transform_streamlines(sft.streamlines, + np.linalg.inv(t1w_img.affine)) + + core_bundle = np.median(np.asarray(set_number_of_points(bundle_t1w, 20)), axis=0) + sft.to_vox() + tract_profiles.append( + afq_profile(fa, sft.streamlines, affine=np.eye(4), + n_points=20)) + + core_actor = lines_as_tubes( + [core_bundle], + 40, + colors=create_colormap(tract_profiles[-1], 'viridis') + ) + + scene.add(core_actor) + +window.record(scene, out_path='all_tract_profiles.png', size=(2400, 2400)) + +############################################################################# +# Tract profiles as a table + +import matplotlib.pyplot as plt +fig, ax = plt.subplots() +ax.plot(np.asarray(tract_profiles).T) +fig.savefig('tract_profiles_as_table.png') \ No newline at end of file diff --git a/examples/howto_examples/plot_viz.py b/examples/howto_examples/plot_viz.py index 160b688dc..e18dd2870 100644 --- a/examples/howto_examples/plot_viz.py +++ b/examples/howto_examples/plot_viz.py @@ -16,9 +16,11 @@ from dipy.io.streamline import load_trk from dipy.tracking.streamline import transform_streamlines +from dipy.align import resample from fury import actor, window from fury.colormap import create_colormap +from matplotlib.cm import tab20 import AFQ.data.fetch as afd @@ -96,7 +98,9 @@ # .. note:: # A virtual frame buffer is needed if you are running this example on # a machine that is not connected to a display ("headless"). If this is -# the case, you can either +# the case, you can either run this example with the environment variable +# "XVFB" set to "1" or "True" or you can remove the if statement below, +# which will start a virtual frame buffer for you. if os.environ.get("XVFB", False): print("Initializing XVFB") @@ -231,6 +235,7 @@ def slice_volume(data, x=None, y=None, z=None): focal_point=(96.32, 110.34, 84.48), view_up=(-0.33, -0.12, 0.94)) +scene.background((1, 1, 1)) ############################################################################# # Record the visualization # ------------------------- @@ -242,6 +247,7 @@ def slice_volume(data, x=None, y=None, z=None): window.record(scene, out_path='arc_cst1.png', size=(2400, 2400)) + ############################################################################ # Setting bundle colors # --------------------- @@ -256,7 +262,6 @@ def slice_volume(data, x=None, y=None, z=None): # the scene before adding these new actors and adding back the slice actors. We # then call `record` to create this new figure. -from matplotlib.cm import tab20 color_arc = tab20.colors[18] color_cst = tab20.colors[2] @@ -364,7 +369,6 @@ def slice_volume(data, x=None, y=None, z=None): # be visualized provided an array with a binary representation of the volume # enclosed in this boundary -from dipy.align import resample waypoint1 = nib.load( op.join( @@ -374,7 +378,7 @@ def slice_volume(data, x=None, y=None, z=None): waypoint2 = nib.load( op.join( afq_path, - "ROIs", "sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_desc-ROI-ARC_L-1-include.nii.gz")) + "ROIs", "sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_desc-ROI-ARC_L-2-include.nii.gz")) waypoint1_xform = resample(waypoint1, t1w_img) waypoint2_xform = resample(waypoint2, t1w_img) From 4ea97d36242868f8349deeac4859ea6ec3dc0a6d Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Sat, 9 Sep 2023 10:41:46 -0700 Subject: [PATCH 02/12] Reorganize the bundle order. --- .../plot_stages_of_tractometry.py | 39 +++++++++++++------ 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/examples/howto_examples/plot_stages_of_tractometry.py b/examples/howto_examples/plot_stages_of_tractometry.py index e1ca6ed17..ce204cb15 100644 --- a/examples/howto_examples/plot_stages_of_tractometry.py +++ b/examples/howto_examples/plot_stages_of_tractometry.py @@ -288,16 +288,26 @@ def lines_as_tubes(sl, line_width, **kwargs): np.linalg.inv(t1w_img.affine)) -bundles = ["ARC_R", "ARC_L", - "ATR_R", "ATR_L", - "CST_R", "CST_L", - "IFO_R", "IFO_L", - "ILF_R", "ILF_L", - "SLF_R", "SLF_L", - "UNC_R", "UNC_L", - "CGC_R", "CGC_L", - "Orbital", "AntFrontal", "SupFrontal", "Motor", - "SupParietal", "PostParietal", "Temporal", "Occipital"] +bundles = [ + "ARC_R", + "ATR_R", + "CST_R", + "IFO_R", + "ILF_R", + "SLF_R", + "UNC_R", + "CGC_R", + "Orbital", "AntFrontal", "SupFrontal", "Motor", + "SupParietal", "PostParietal", "Temporal", "Occipital", + "CGC_L", + "UNC_L", + "SLF_L", + "ILF_L", + "IFO_L", + "CST_L", + "ATR_L", + "ARC_L", + ] color_dict = gen_color_dict(bundles) @@ -443,5 +453,12 @@ def lines_as_tubes(sl, line_width, **kwargs): import matplotlib.pyplot as plt fig, ax = plt.subplots() -ax.plot(np.asarray(tract_profiles).T) +for ii, bundle in enumerate(bundles): + ax.plot(np.arange(ii * 20, (ii + 1) * 20), + tract_profiles[ii], + color=color_dict[bundle], + linewidth=3) +ax.set_xticks(np.arange(0, 20 * len(bundles), 20)) +ax.set_xticklabels(bundles, rotation=45, ha='right') +fig.set_size_inches(10, 4) fig.savefig('tract_profiles_as_table.png') \ No newline at end of file From 8cbdecb54a07eb2c1623487376adf56916690020 Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Mon, 11 Sep 2023 14:38:46 -0700 Subject: [PATCH 03/12] Reorganize the front page. --- docs/source/index.rst | 44 ++++++++++++++++++------------------------- 1 file changed, 18 insertions(+), 26 deletions(-) diff --git a/docs/source/index.rst b/docs/source/index.rst index 9a9053003..fdb57169c 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -7,7 +7,7 @@ Automated Fiber Quantification in Python (pyAFQ) pyAFQ is an open-source software tool for the analysis of brain white matter in diffusion MRI measurements. It implements a complete and automated data processing pipeline for tractometry, from raw DTI data to white matter tract identification -, as well as quantification of tissue properties along the length of the +, as well as quantification of tissue properties along the length of the major long-range brain white matter connections. @@ -22,9 +22,9 @@ major long-range brain white matter connections. .. note:: Tractography is concerned with the visualization and mapping of white matter - tracts in the brain, while tractometry involves the quantitative analysis of - the structural properties of these tracts. Both techniques are valuable in - understanding the brain's connectivity and can provide insights into various + tracts in the brain, while tractometry involves the quantitative analysis of + the structural properties of these tracts. Both techniques are valuable in + understanding the brain's connectivity and can provide insights into various neurological conditions and cognitive processes. More information can be found in the Explanations page. @@ -41,29 +41,28 @@ major long-range brain white matter connections. .. grid:: 2 .. grid-item-card:: - :link: howto/index.html + :link: tutorials/index.html - :octicon:`rocket;3em;sd-text-center` + :octicon:`book;3em;sd-text-center` - How To - ^^^^^^ + Tutorials + ^^^^^^^^^ - User's guide to pyAFQ. This guide assumes you know - the basics and walks through more commonly used examples. + Beginner's guide to pyAFQ. This guide introduces pyAFQ'S + basic concepts and walks through fundamentals of using the software. +++ - .. grid-item-card:: - :link: tutorials/index.html + :link: howto/index.html - :octicon:`book;3em;sd-text-center` + :octicon:`rocket;3em;sd-text-center` - Tutorials - ^^^^^^^^^ + How To + ^^^^^^ - Beginner's guide to pyAFQ. This guide introduces pyAFQ'S - basic concepts and walks through fundamentals of using the software. + A set of recipes for specific use-cases. This guide assumes you know + the basics and walks through more commonly used examples. +++ @@ -76,10 +75,8 @@ major long-range brain white matter connections. Explanations ^^^^^^^^^^^^ - For more experienced users. This guide contains in depth - explanations on how to use pyAFQ methods. It includes how to - create detailed visualizations and analyses. - + More detailed and conceptual, this guide contains in depth + explanations on how to think about the various pyAFQ methods. +++ .. grid-item-card:: @@ -116,8 +113,3 @@ R01EB027585) to Eleftherios Garyfallidis and to Ariel Rokem , and by `NSF grant :align: center :figclass: align-center :target: http://brainandeducation.com - - - - - From b0e687d116668bfbb6b72a69788221516de51ec4 Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Mon, 11 Sep 2023 14:41:03 -0700 Subject: [PATCH 04/12] Build tutorials directly into the higher level. This is so that when we click over from the front page, we directly land in the page that has all the thumb-nails with the tutorial titles. --- docs/source/conf.py | 1 + docs/source/tutorials/index.rst | 7 ------- 2 files changed, 1 insertion(+), 7 deletions(-) delete mode 100644 docs/source/tutorials/index.rst diff --git a/docs/source/conf.py b/docs/source/conf.py index b0ef2734a..5f49df01f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -227,6 +227,7 @@ '../../examples/tutorial_examples'], # path where to save gallery generated examples 'gallery_dirs': ['howto/howto_examples', 'tutorials/tutorial_examples'], + 'ignore_pattern': 'plot_baby_afq.py|cloudknot_hcp_example.py|cloudknot_example.py|add_custom_bundle.py', # noqa 'image_scrapers': image_scrapers, 'reset_modules': (reset_progressbars), 'show_memory': True, diff --git a/docs/source/tutorials/index.rst b/docs/source/tutorials/index.rst deleted file mode 100644 index 9f12fd6ef..000000000 --- a/docs/source/tutorials/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Tutorials ---------- - -.. toctree:: - :maxdepth: 2 - - tutorial_examples/index.rst \ No newline at end of file From 5b6dfe2ad204361b6ba4db13adec27746c601a72 Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Mon, 11 Sep 2023 15:08:39 -0700 Subject: [PATCH 05/12] Adding more content to first tutorial. Also, reordering the tutorials on the page in a somewhat more deliberate fashion. We'll need to follow up and change the titles for all of these tutorials. --- docs/source/conf.py | 3 + .../use_subject_space_rois_from_freesurfer.py | 0 examples/tutorial_examples/README.txt | 11 +- examples/tutorial_examples/plot_afq_api.py | 129 +++++++++++------- 4 files changed, 92 insertions(+), 51 deletions(-) rename examples/{tutorial_examples => howto_examples}/use_subject_space_rois_from_freesurfer.py (100%) diff --git a/docs/source/conf.py b/docs/source/conf.py index 5f49df01f..c7d590500 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -221,12 +221,15 @@ from _progressbars import reset_progressbars # noqa +from sphinx_gallery.sorting import ExampleTitleSortKey + sphinx_gallery_conf = { # path to your examples scripts 'examples_dirs': ['../../examples/howto_examples', '../../examples/tutorial_examples'], # path where to save gallery generated examples 'gallery_dirs': ['howto/howto_examples', 'tutorials/tutorial_examples'], + 'within_subsection_order': ExampleTitleSortKey, 'ignore_pattern': 'plot_baby_afq.py|cloudknot_hcp_example.py|cloudknot_example.py|add_custom_bundle.py', # noqa 'image_scrapers': image_scrapers, 'reset_modules': (reset_progressbars), diff --git a/examples/tutorial_examples/use_subject_space_rois_from_freesurfer.py b/examples/howto_examples/use_subject_space_rois_from_freesurfer.py similarity index 100% rename from examples/tutorial_examples/use_subject_space_rois_from_freesurfer.py rename to examples/howto_examples/use_subject_space_rois_from_freesurfer.py diff --git a/examples/tutorial_examples/README.txt b/examples/tutorial_examples/README.txt index 826425aab..aacbc1672 100644 --- a/examples/tutorial_examples/README.txt +++ b/examples/tutorial_examples/README.txt @@ -1,6 +1,11 @@ .. _tutorial_examples: -Examples -======== +Tutorials +========= -Tutorial examples. \ No newline at end of file +Each of the following tutorials includes code and images to help walk you +through usage of pyAFQ. If you are new to the software and want to learn +about its usage, walk through each of these tutorials. In many cases, +the code has already been run and you can see the outputs. You can also +download a `.py` script or a `.ipynb` Jupyter notebook file that you can run +on your own computer, after `downloading and installing the pyAFQ software `_. \ No newline at end of file diff --git a/examples/tutorial_examples/plot_afq_api.py b/examples/tutorial_examples/plot_afq_api.py index dedf0dc14..f8d4ec8f5 100644 --- a/examples/tutorial_examples/plot_afq_api.py +++ b/examples/tutorial_examples/plot_afq_api.py @@ -1,59 +1,91 @@ """ -========================== -AFQ API -========================== +========================================================= +1 Using the pyAFQ Application Programming Interface (API) +========================================================= -An example using the AFQ API +In this first tutorial, we will introduce the use of the pyAFQ Application +Programming Interface (API). This API is designed to be flexible and +extensible, and allows users to customize the pipeline to their needs. +The pipeline executes the following steps: +1. Tractography +2. Registration to a template. +3. Bundle segmentation + cleaning +4. Tissue property modeling +5. Tract profile calculation +6. Bundle visualization + + +We will not go into these in a lot more detail here, but you can read more +about them in other examples and tutorials. """ -import os.path as op +########################################################################## +# Importing libraries that we will use +# ------------------------------------ +# The tutorial will show-case some of the functionality and outputs of +# pyAFQ. We will use the following libraries in this tutorial: +# +# - `AFQ.api.group.GroupAFQ` to run the pipeline. As you will see below, this +# object is the workhorse of pyAFQ, and used to define the pipeline and +# execute it. +# - `AFQ.data.fetch` is pyAFQ's data management module. Here, we use it to +# download example data and to locate the data in the user's home directory. +# - `os.path` is used to specify file paths +# - `matplotlib.pyplot` used to visualize the results with 2D plots and +# figures. +# - `nibabel` is used to load resulting data derivatives. +# - `plotly` is used to visualize the results with 3D web-based visualizations. +# - `pandas` is used to read the results into a table. +# +# +from AFQ.api.group import GroupAFQ +import AFQ.data.fetch as afd + +import os.path as op import matplotlib.pyplot as plt import nibabel as nib import plotly import pandas as pd -from AFQ.api.group import GroupAFQ -import AFQ.data.fetch as afd ########################################################################## -# Get some example data -# --------------------- -# -# Retrieves High angular resolution diffusion imaging (HARDI) dataset from -# Stanford's Vista Lab -# -# see https://purl.stanford.edu/ng782rw8378 for details on dataset. -# -# The data for the first subject and first session are downloaded locally -# (by default into the users home directory) under: -# -# ``.dipy/stanford_hardi/`` -# -# Anatomical data (``anat``) and Diffusion-weighted imaging data (``dwi``) are -# then extracted, formatted to be BIDS compliant, and placed in the AFQ -# data directory (by default in the users home directory) under: +# Download example data and organize it +# ------------------------------------- +# Before we can start running the software we need to download some data. +# We will use an example dataset that contains high angular resolution +# diffusion MR imaging (HARDI) from one subject. This data was acquired in +# the Stanford Center for Cognitive and Neurobiological Imaging (CNI). You can +# learn more about this data in [Rokem2015]_. # -# ``AFQ_data/stanford_hardi/`` +# The following code downloads the data and organizes it in a BIDS compliant +# format, and places it in the `AFQ_data` directory, which is typically +# located in your home directory under: # -# This data represents the required preprocessed diffusion data necessary for -# intializing the GroupAFQ object (which we will do next) +# ``~/AFQ_data/stanford_hardi/`` # -# The clear_previous_afq is used to remove any previous runs of the afq object -# stored in the AFQ_data/stanford_hardi/ BIDS directory. Set it to None if -# you want to use the results of previous runs. +# The `clear_previous_afq` argument is used to remove the outputs from previous +# runs of pyAFQ that may be stored in the AFQ_data/stanford_hardi/ BIDS +# directory. Set it to None if you want to use all the results of previous +# runs of pyAFQ. Here, we set it to clear all of the the outputs past the +# tractography stage (which tends to be time-consuming). afd.organize_stanford_data(clear_previous_afq="track") ########################################################################## -# Set tractography parameters (optional) -# --------------------- -# We make this tracking_params which we will pass to the GroupAFQ object -# which specifies that we want 25,000 seeds randomly distributed -# in the white matter. -# -# We only do this to make this example faster and consume less space. +# Set tractography parameters +# --------------------------- +# The pyAFQ API allows us to define the parameters used for tractography. +# If you do not set these parameters, a reasonable set of defaults is used: +# probabilistic tractography using constrained spherical decovolution with one +# seed in every white-matter voxel. Here, we create an alternative setting, +# which uses csd-based probabilistic tractography, but with only 25,000 seeds +# randomly distributed in the white matter. This is a much smaller number of +# seeds than the default, and will result in a much faster run-time. However, +# it will also result in less accurate tractography, and may result in missing +# some of the smaller tracts. In this case, we only do this to make this +# example faster and consume less space. tracking_params = dict(n_seeds=25000, random_seeds=True, @@ -64,18 +96,9 @@ # Initialize a GroupAFQ object: # ------------------------- # -# Creates a GroupAFQ object, that encapsulates tractometry. This object can be -# used to manage the entire AFQ pipeline, including: -# -# - Tractography -# - Registration -# - Segmentation -# - Cleaning -# - Profiling -# - Visualization -# -# In this example we will load the subjects session data from the previous step -# using the default AFQ parameters. +# The following code creates the GroupAFQ object, which manages all of the +# data transformations and computations conducted by the software, based on +# its initial configuration, which we set up below. # # .. note:: # @@ -212,3 +235,13 @@ raise ValueError(( "Small number of streamlines found " f"for bundle(s):\n{bundle_counts}")) + + +########################################################################## +# References +# ---------- +# +# .. [Rokem2015] Ariel Rokem, Jason D Yeatman, Franco Pestilli, Kendrick +# N Kay, Aviv Mezer, Stefan van der Walt, Brian A Wandell. Evaluating the +# accuracy of diffusion MRI models in white matter. PLoS One, 10(4), +# e0123272, 2015. \ No newline at end of file From fa64cde8315bad1a9ea5ce1ea3eec6244eaa7d66 Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Tue, 12 Sep 2023 14:07:50 -0700 Subject: [PATCH 06/12] Add the custom scalars page back into an index for usage. --- docs/source/howto/index.rst | 1 + examples/tutorial_examples/README.txt | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/source/howto/index.rst b/docs/source/howto/index.rst index eb18911cc..db64df058 100644 --- a/docs/source/howto/index.rst +++ b/docs/source/howto/index.rst @@ -13,5 +13,6 @@ software. developing/index getting_help cite + scalars howto_examples/index diff --git a/examples/tutorial_examples/README.txt b/examples/tutorial_examples/README.txt index aacbc1672..6c8cc72eb 100644 --- a/examples/tutorial_examples/README.txt +++ b/examples/tutorial_examples/README.txt @@ -8,4 +8,6 @@ through usage of pyAFQ. If you are new to the software and want to learn about its usage, walk through each of these tutorials. In many cases, the code has already been run and you can see the outputs. You can also download a `.py` script or a `.ipynb` Jupyter notebook file that you can run -on your own computer, after `downloading and installing the pyAFQ software `_. \ No newline at end of file +on your own computer, after +`downloading and installing the pyAFQ software `_. + From eb64f97e095fa17c7a250267a910de7e0ce9d70a Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Fri, 15 Sep 2023 16:29:33 -0700 Subject: [PATCH 07/12] Adds code to make videos. --- .../plot_stages_of_tractometry.py | 111 +++++++++++++----- 1 file changed, 82 insertions(+), 29 deletions(-) diff --git a/examples/howto_examples/plot_stages_of_tractometry.py b/examples/howto_examples/plot_stages_of_tractometry.py index ce204cb15..497984515 100644 --- a/examples/howto_examples/plot_stages_of_tractometry.py +++ b/examples/howto_examples/plot_stages_of_tractometry.py @@ -32,6 +32,23 @@ import AFQ.data.fetch as afd from AFQ.viz.utils import gen_color_dict + +from PIL import Image +def make_video(frames, out): + video = [] + for nn in frames: + frame = Image.open(nn) + video.append(frame) + + # Save the frames as an animated GIF + video[0].save( + out, + save_all=True, + append_images=video[1:], + duration=300, + loop=1) + + ############################################################################# # # .. note:: @@ -138,23 +155,23 @@ def slice_volume(data, x=None, y=None, z=None): return slicer_actors -slicers_b0 = slice_volume(dmri_b0, x=dmri_b0.shape[0] // 2, z=dmri_b0.shape[-1] // 3) -slicers_b1000 = slice_volume(dmri_b1000, x=dmri_b0.shape[0] // 2, z=dmri_b0.shape[-1] // 3) -slicers_b2000 = slice_volume(dmri_b2000, x=dmri_b0.shape[0] // 2, z=dmri_b0.shape[-1] // 3) +slicers_b0 = slice_volume(dmri_b0, x=dmri_b0.shape[0] // 2, y=dmri_b0.shape[1] // 2, z=dmri_b0.shape[-1] // 3) +slicers_b1000 = slice_volume(dmri_b1000, x=dmri_b0.shape[0] // 2, y=dmri_b0.shape[1] // 2, z=dmri_b0.shape[-1] // 3) +slicers_b2000 = slice_volume(dmri_b2000, x=dmri_b0.shape[0] // 2, y=dmri_b0.shape[1] // 2, z=dmri_b0.shape[-1] // 3) for bval, slicers in zip([0, 1000, 2000], [slicers_b0, slicers_b1000, slicers_b2000]): scene = window.Scene() for slicer in slicers: scene.add(slicer) - scene.set_camera( - position=(238.04, 174.48, 143.04), - focal_point=(96.32, 110.34, 84.48), - view_up=(-0.33, -0.12, 0.94)) + scene.set_camera(position=(721.34, 393.48, 97.03), + focal_point=(96.00, 114.00, 96.00), + view_up=(-0.01, 0.02, 1.00)) scene.background((1, 1, 1)) - window.record(scene, out_path=f'b{bval}.png', size=(2400, 2400)) - + window.record(scene, out_path=f'b{bval}', size=(2400, 2400), + n_frames=36, path_numbering=True) + make_video([f'b{bval}{ii:06d}.png' for ii in range(36)], f'b{bval}.gif') ############################################################################# # Visualizing whole-brain tractography # ------------------------------------ @@ -204,7 +221,7 @@ def lines_as_tubes(sl, line_width, **kwargs): whole_brain_actor = lines_as_tubes(whole_brain_t1w, 2) -slicers = slice_volume(t1w, x=t1w.shape[0] // 2, z=t1w.shape[-1] // 3) +slicers = slice_volume(t1w, x=t1w.shape[0] // 2, y=t1w.shape[1] // 2) scene = window.Scene() @@ -212,19 +229,21 @@ def lines_as_tubes(sl, line_width, **kwargs): for slicer in slicers: scene.add(slicer) -scene.set_camera(position=(238.04, 174.48, 143.04), - focal_point=(96.32, 110.34, 84.48), - view_up=(-0.33, -0.12, 0.94)) +scene.set_camera(position=(721.34, 393.48, 97.03), + focal_point=(96.00, 114.00, 96.00), + view_up=(-0.01, 0.02, 1.00)) scene.background((1, 1, 1)) -window.record(scene, out_path='whole_brain.png', size=(2400, 2400)) +window.record(scene, out_path='whole_brain', size=(2400, 2400), + n_frames=36, path_numbering=True) + +make_video([f"whole_brain{ii:06d}.png" for ii in range(36)], "whole_brain.gif") ############################################################################# # Whole brain with waypoints # -------------------------------------- # - scene.clear() whole_brain_actor = lines_as_tubes(whole_brain_t1w, 2) slicers = slice_volume(t1w, x=t1w.shape[0] // 2, z=t1w.shape[-1] // 3) @@ -263,7 +282,11 @@ def lines_as_tubes(sl, line_width, **kwargs): scene.add(waypoint1_actor) scene.add(waypoint2_actor) -window.record(scene, out_path='whole_brain_with_waypoints.png', size=(2400, 2400)) +window.record(scene, out_path='whole_brain_with_waypoints', size=(2400, 2400), + n_frames=36, path_numbering=True) + +make_video([f"whole_brain_with_waypoints{ii:06d}.png" for ii in range(36)], + "whole_brain_with_waypoints.gif") bundle_path = op.join(afq_path, 'bundles') @@ -321,7 +344,10 @@ def lines_as_tubes(sl, line_width, **kwargs): scene.add(waypoint1_actor) scene.add(waypoint2_actor) -window.record(scene, out_path='arc1.png', size=(2400, 2400)) +window.record(scene, out_path='arc1', size=(2400, 2400), + n_frames=36, path_numbering=True) + +make_video([f"arc1{ii:06d}.png" for ii in range(36)], "arc1.gif") ############################################################################# # Clean bundle @@ -333,7 +359,10 @@ def lines_as_tubes(sl, line_width, **kwargs): for slicer in slicers: scene.add(slicer) -window.record(scene, out_path='arc2.png', size=(2400, 2400)) +window.record(scene, out_path='arc2', size=(2400, 2400), + n_frames=36, path_numbering=True) + +make_video([f"arc2{ii:06d}.png" for ii in range(36)], "arc2.gif") clean_bundles_path = op.join(afq_path, 'clean_bundles') @@ -353,26 +382,33 @@ def lines_as_tubes(sl, line_width, **kwargs): for slicer in slicers: scene.add(slicer) -window.record(scene, out_path='arc3.png', size=(2400, 2400)) +window.record(scene, out_path='arc3', size=(2400, 2400), + n_frames=36, path_numbering=True) + +make_video([f"arc3{ii:06d}.png" for ii in range(36)], "arc3.gif") ############################################################################# # Show the values of tissue properties along the bundle # --------------- +lut_args = dict(scale_range=(0, 1), + hue_range=(1, 0), + saturation_range=(0, 1), + value_range=(0, 1)) + arc_actor = lines_as_tubes(arc_t1w, 8, colors=resample(fa_img, t1w_img).get_fdata(), - lookup_colormap=colormap_lookup_table( - scale_range=(0, 1), - hue_range=(0, 1), - saturation_range=(1, 1), - value_range=(0.8, 0.8))) + lookup_colormap=colormap_lookup_table(**lut_args)) scene.clear() scene.add(arc_actor) for slicer in slicers: scene.add(slicer) -window.record(scene, out_path='arc4.png', size=(2400, 2400)) +window.record(scene, out_path='arc4', size=(2400, 2400), + n_frames=36, path_numbering=True) + +make_video([f"arc4{ii:06d}.png" for ii in range(36)], "arc4.gif") ############################################################################# # Core of the bundle and tract profile @@ -391,6 +427,10 @@ def lines_as_tubes(sl, line_width, **kwargs): colors=create_colormap(arc_profile, 'viridis') ) +arc_actor = lines_as_tubes(arc_t1w, 1, + colors=resample(fa_img, t1w_img).get_fdata(), + lookup_colormap=colormap_lookup_table(**lut_args)) + scene.clear() for slicer in slicers: @@ -398,7 +438,10 @@ def lines_as_tubes(sl, line_width, **kwargs): scene.add(arc_actor) scene.add(core_arc_actor) -window.record(scene, out_path='arc5.png', size=(2400, 2400)) +window.record(scene, out_path='arc5', size=(2400, 2400), + n_frames=36, path_numbering=True) + +make_video([f"arc5{ii:06d}.png" for ii in range(36)], "arc5.gif") scene.clear() @@ -411,12 +454,15 @@ def lines_as_tubes(sl, line_width, **kwargs): sft.to_rasmm() bundle_t1w = transform_streamlines(sft.streamlines, - np.linalg.inv(t1w_img.affine)) + np.linalg.inv(t1w_img.affine)) bundle_actor = lines_as_tubes(bundle_t1w, 8, colors=color_dict[bundle]) scene.add(bundle_actor) -window.record(scene, out_path='all_bundles.png', size=(2400, 2400)) +window.record(scene, out_path='all_bundles', size=(2400, 2400), + n_frames=36, path_numbering=True) + +make_video([f"all_bundles{ii:06d}.png" for ii in range(36)], "all_bundles.gif") scene.clear() @@ -446,7 +492,14 @@ def lines_as_tubes(sl, line_width, **kwargs): scene.add(core_actor) -window.record(scene, out_path='all_tract_profiles.png', size=(2400, 2400)) +window.record(scene, + out_path='all_tract_profiles', + size=(2400, 2400), + n_frames=36, + path_numbering=True) + +make_video([f"all_tract_profiles{ii:06d}.png" for ii in range(36)], + "all_tract_profiles.gif") ############################################################################# # Tract profiles as a table From 93817cf92a0060cf195d2a8c096ebe4c103ece4f Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Sat, 16 Sep 2023 10:34:03 -0700 Subject: [PATCH 08/12] Increase the number of frames. Save frames to temp location. --- .../plot_stages_of_tractometry.py | 80 ++++++++++--------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/examples/howto_examples/plot_stages_of_tractometry.py b/examples/howto_examples/plot_stages_of_tractometry.py index 497984515..d84ead453 100644 --- a/examples/howto_examples/plot_stages_of_tractometry.py +++ b/examples/howto_examples/plot_stages_of_tractometry.py @@ -18,6 +18,7 @@ import os.path as op import nibabel as nib import numpy as np +import tempfile from dipy.io.streamline import load_trk from dipy.tracking.streamline import transform_streamlines, set_number_of_points @@ -32,8 +33,9 @@ import AFQ.data.fetch as afd from AFQ.viz.utils import gen_color_dict - from PIL import Image + + def make_video(frames, out): video = [] for nn in frames: @@ -66,6 +68,10 @@ def make_video(frames, out): vdisplay = Xvfb() vdisplay.start() + +tmp = tempfile.mkdtemp() +n_frames = 72 + ############################################################################### # Get some data from HBN POD2 # ---------------------------- @@ -168,10 +174,11 @@ def slice_volume(data, x=None, y=None, z=None): view_up=(-0.01, 0.02, 1.00)) scene.background((1, 1, 1)) - window.record(scene, out_path=f'b{bval}', size=(2400, 2400), - n_frames=36, path_numbering=True) + window.record(scene, out_path=f'{tmp}/b{bval}', + size=(2400, 2400), + n_frames=n_frames, path_numbering=True) - make_video([f'b{bval}{ii:06d}.png' for ii in range(36)], f'b{bval}.gif') + make_video([f'{tmp}/b{bval}{ii:06d}.png' for ii in range(n_frames)], f'b{bval}.gif') ############################################################################# # Visualizing whole-brain tractography # ------------------------------------ @@ -221,7 +228,7 @@ def lines_as_tubes(sl, line_width, **kwargs): whole_brain_actor = lines_as_tubes(whole_brain_t1w, 2) -slicers = slice_volume(t1w, x=t1w.shape[0] // 2, y=t1w.shape[1] // 2) +slicers = slice_volume(t1w, y=t1w.shape[1] // 2 - 5, z=t1w.shape[-1] // 3) scene = window.Scene() @@ -234,10 +241,11 @@ def lines_as_tubes(sl, line_width, **kwargs): view_up=(-0.01, 0.02, 1.00)) scene.background((1, 1, 1)) -window.record(scene, out_path='whole_brain', size=(2400, 2400), - n_frames=36, path_numbering=True) +window.record(scene, out_path=f'{tmp}/whole_brain', size=(2400, 2400), + n_frames=n_frames, path_numbering=True) -make_video([f"whole_brain{ii:06d}.png" for ii in range(36)], "whole_brain.gif") +make_video([f"{tmp}/whole_brain{ii:06d}.png" for ii in range(n_frames)], + "whole_brain.gif") ############################################################################# # Whole brain with waypoints @@ -246,7 +254,6 @@ def lines_as_tubes(sl, line_width, **kwargs): scene.clear() whole_brain_actor = lines_as_tubes(whole_brain_t1w, 2) -slicers = slice_volume(t1w, x=t1w.shape[0] // 2, z=t1w.shape[-1] // 3) scene.add(whole_brain_actor) for slicer in slicers: @@ -282,10 +289,10 @@ def lines_as_tubes(sl, line_width, **kwargs): scene.add(waypoint1_actor) scene.add(waypoint2_actor) -window.record(scene, out_path='whole_brain_with_waypoints', size=(2400, 2400), - n_frames=36, path_numbering=True) +window.record(scene, out_path=f'{tmp}/whole_brain_with_waypoints', size=(2400, 2400), + n_frames=n_frames, path_numbering=True) -make_video([f"whole_brain_with_waypoints{ii:06d}.png" for ii in range(36)], +make_video([f"{tmp}/whole_brain_with_waypoints{ii:06d}.png" for ii in range(n_frames)], "whole_brain_with_waypoints.gif") bundle_path = op.join(afq_path, @@ -344,10 +351,10 @@ def lines_as_tubes(sl, line_width, **kwargs): scene.add(waypoint1_actor) scene.add(waypoint2_actor) -window.record(scene, out_path='arc1', size=(2400, 2400), - n_frames=36, path_numbering=True) +window.record(scene, out_path=f'{tmp}/arc1', size=(2400, 2400), + n_frames=n_frames, path_numbering=True) -make_video([f"arc1{ii:06d}.png" for ii in range(36)], "arc1.gif") +make_video([f"{tmp}/arc1{ii:06d}.png" for ii in range(n_frames)], "arc1.gif") ############################################################################# # Clean bundle @@ -359,10 +366,10 @@ def lines_as_tubes(sl, line_width, **kwargs): for slicer in slicers: scene.add(slicer) -window.record(scene, out_path='arc2', size=(2400, 2400), - n_frames=36, path_numbering=True) +window.record(scene, out_path=f'{tmp}/arc2', size=(2400, 2400), + n_frames=n_frames, path_numbering=True) -make_video([f"arc2{ii:06d}.png" for ii in range(36)], "arc2.gif") +make_video([f"arc2{ii:06d}.png" for ii in range(n_frames)], "arc2.gif") clean_bundles_path = op.join(afq_path, 'clean_bundles') @@ -382,10 +389,10 @@ def lines_as_tubes(sl, line_width, **kwargs): for slicer in slicers: scene.add(slicer) -window.record(scene, out_path='arc3', size=(2400, 2400), - n_frames=36, path_numbering=True) +window.record(scene, out_path=f'{tmp}/arc3', size=(2400, 2400), + n_frames=n_frames, path_numbering=True) -make_video([f"arc3{ii:06d}.png" for ii in range(36)], "arc3.gif") +make_video([f"{tmp}/arc3{ii:06d}.png" for ii in range(n_frames)], "arc3.gif") ############################################################################# # Show the values of tissue properties along the bundle @@ -405,10 +412,10 @@ def lines_as_tubes(sl, line_width, **kwargs): for slicer in slicers: scene.add(slicer) -window.record(scene, out_path='arc4', size=(2400, 2400), - n_frames=36, path_numbering=True) +window.record(scene, out_path=f'{tmp}/arc4', size=(2400, 2400), + n_frames=n_frames, path_numbering=True) -make_video([f"arc4{ii:06d}.png" for ii in range(36)], "arc4.gif") +make_video([f"{tmp}/arc4{ii:06d}.png" for ii in range(n_frames)], "arc4.gif") ############################################################################# # Core of the bundle and tract profile @@ -438,10 +445,10 @@ def lines_as_tubes(sl, line_width, **kwargs): scene.add(arc_actor) scene.add(core_arc_actor) -window.record(scene, out_path='arc5', size=(2400, 2400), - n_frames=36, path_numbering=True) +window.record(scene, out_path=f'{tmp}/arc5', size=(2400, 2400), + n_frames=n_frames, path_numbering=True) -make_video([f"arc5{ii:06d}.png" for ii in range(36)], "arc5.gif") +make_video([f"{tmp}/arc5{ii:06d}.png" for ii in range(n_frames)], "arc5.gif") scene.clear() @@ -459,10 +466,10 @@ def lines_as_tubes(sl, line_width, **kwargs): bundle_actor = lines_as_tubes(bundle_t1w, 8, colors=color_dict[bundle]) scene.add(bundle_actor) -window.record(scene, out_path='all_bundles', size=(2400, 2400), - n_frames=36, path_numbering=True) +window.record(scene, out_path=f'{tmp}/all_bundles', size=(2400, 2400), + n_frames=n_frames, path_numbering=True) -make_video([f"all_bundles{ii:06d}.png" for ii in range(36)], "all_bundles.gif") +make_video([f"{tmp}/all_bundles{ii:06d}.png" for ii in range(n_frames)], "all_bundles.gif") scene.clear() @@ -493,13 +500,13 @@ def lines_as_tubes(sl, line_width, **kwargs): scene.add(core_actor) window.record(scene, - out_path='all_tract_profiles', + out_path=f'{tmp}/all_tract_profiles', size=(2400, 2400), - n_frames=36, + n_frames=n_frames, path_numbering=True) -make_video([f"all_tract_profiles{ii:06d}.png" for ii in range(36)], - "all_tract_profiles.gif") +make_video([f"{tmp}/all_tract_profiles{ii:06d}.png" for ii in range(n_frames)], + "all_tract_profiles.gif") ############################################################################# # Tract profiles as a table @@ -513,5 +520,6 @@ def lines_as_tubes(sl, line_width, **kwargs): linewidth=3) ax.set_xticks(np.arange(0, 20 * len(bundles), 20)) ax.set_xticklabels(bundles, rotation=45, ha='right') -fig.set_size_inches(10, 4) -fig.savefig('tract_profiles_as_table.png') \ No newline at end of file +fig.set_size_inches(10, 5) +fig.savefig('tract_profiles_as_table.png') +plt.tight_layout() \ No newline at end of file From ec8086cff113cf82069ad6f3436361401518bf8d Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Sat, 16 Sep 2023 10:47:23 -0700 Subject: [PATCH 09/12] Rewrite to focus on videos. --- .../plot_stages_of_tractometry.py | 55 +++++++++++++++---- 1 file changed, 44 insertions(+), 11 deletions(-) diff --git a/examples/howto_examples/plot_stages_of_tractometry.py b/examples/howto_examples/plot_stages_of_tractometry.py index d84ead453..91639bb96 100644 --- a/examples/howto_examples/plot_stages_of_tractometry.py +++ b/examples/howto_examples/plot_stages_of_tractometry.py @@ -1,18 +1,25 @@ """ -============================ -The stages of tractometry -============================ - -This example visualizes the different stages of tractometry, from the -preprocessed diffusion data to the final tract profiles. We will use the Fury -library to visualize the results of pyAFQ in high-quality, publication-ready -figures. +================================================= +Making videos the different stages of tractometry +================================================= + +Two-dimensional figures of anatomical data are somewhat limited, because of the +complex three-dimensional configuration of the brain. Therefored, dynamic +videos of anatomical data are useful for exploring the data, as well as for +creating dynamic presentations of the results. This example visualizes various +stages of tractometry, from preprocessed diffusion data to the final tract +profiles. We will use the `Fury `_ software library to +visualize individual frames of the results of each stage, and then create +videos of each stage of the process using the Python Image Library (PIL, also +known as pillow). """ ############################################################################## -# In this set of examples, we will use the `fury `_ -# library to visualize outputs of pyAFQ as publication-ready figures. +# Imports +# ------- +# + import os import os.path as op @@ -35,8 +42,34 @@ from PIL import Image +############################################################################## +# Define a function that makes videos +# ----------------------------------- +# The PIL library has a function that can be used to create animated GIFs from +# a series of images. We will use this function to create videos. +# +# .. note:: +# This function is not part of the AFQ library, but is included here for +# convenience. It is not necessary to understand this function in order to +# understand the rest of the example. If you are interested in learning more +# about this function, you can read the PIL documentation. The function is +# based on the `PIL.Image.save `_ +# function. + def make_video(frames, out): + """ + Make a video from a series of frames. + + Parameters + ---------- + frames : list of str + A list of file names of the frames to be included in the video. + + out : str + The name of the output file. Format is determined by the file + extension. + """ video = [] for nn in frames: frame = Image.open(nn) @@ -369,7 +402,7 @@ def lines_as_tubes(sl, line_width, **kwargs): window.record(scene, out_path=f'{tmp}/arc2', size=(2400, 2400), n_frames=n_frames, path_numbering=True) -make_video([f"arc2{ii:06d}.png" for ii in range(n_frames)], "arc2.gif") +make_video([f"{tmp}/arc2{ii:06d}.png" for ii in range(n_frames)], "arc2.gif") clean_bundles_path = op.join(afq_path, 'clean_bundles') From ae4f8786731af1ce2f46a4d1f026dca007b9d2f7 Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Sat, 16 Sep 2023 16:24:43 -0700 Subject: [PATCH 10/12] Fix this filename. --- examples/howto_examples/plot_stages_of_tractometry.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/howto_examples/plot_stages_of_tractometry.py b/examples/howto_examples/plot_stages_of_tractometry.py index 91639bb96..728edc7db 100644 --- a/examples/howto_examples/plot_stages_of_tractometry.py +++ b/examples/howto_examples/plot_stages_of_tractometry.py @@ -28,7 +28,8 @@ import tempfile from dipy.io.streamline import load_trk -from dipy.tracking.streamline import transform_streamlines, set_number_of_points +from dipy.tracking.streamline import (transform_streamlines, + set_number_of_points) from dipy.core.gradients import gradient_table from dipy.align import resample @@ -230,7 +231,7 @@ def slice_volume(data, x=None, y=None, z=None): sft_whole_brain = load_trk(op.join(afq_path, - 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_space-RASMM_model-probCSD_tractography.trk'), fa_img) + 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_space-RASMM_model-CSD_desc-prob-afq_tractography.trk'), fa_img) ############################################################################# From 10c4a64ee7c5bdfa4676a482c9cd5704a26c46c0 Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Sun, 17 Sep 2023 09:48:07 -0700 Subject: [PATCH 11/12] Force a space at the bottom of the tract profiles figure. --- examples/howto_examples/plot_stages_of_tractometry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/howto_examples/plot_stages_of_tractometry.py b/examples/howto_examples/plot_stages_of_tractometry.py index 728edc7db..928c1541b 100644 --- a/examples/howto_examples/plot_stages_of_tractometry.py +++ b/examples/howto_examples/plot_stages_of_tractometry.py @@ -555,5 +555,5 @@ def lines_as_tubes(sl, line_width, **kwargs): ax.set_xticks(np.arange(0, 20 * len(bundles), 20)) ax.set_xticklabels(bundles, rotation=45, ha='right') fig.set_size_inches(10, 5) +plt.subplots_adjust(bottom=0.2) fig.savefig('tract_profiles_as_table.png') -plt.tight_layout() \ No newline at end of file From b16b787a1cb06d84f898e3c7a154aa5809d0c442 Mon Sep 17 00:00:00 2001 From: Ariel Rokem Date: Thu, 9 Nov 2023 13:43:54 -0800 Subject: [PATCH 12/12] Include the usage portion of the docs. --- docs/source/howto/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/howto/index.rst b/docs/source/howto/index.rst index db64df058..5f9357242 100644 --- a/docs/source/howto/index.rst +++ b/docs/source/howto/index.rst @@ -8,7 +8,7 @@ software. :maxdepth: 2 installation_guide - usage + usage/index contributing developing/index getting_help