Skip to content
This repository has been archived by the owner on Oct 6, 2021. It is now read-only.

adds lesion and track estimation, additional atlas functionality and generic tools #14

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file.
126 changes: 126 additions & 0 deletions neuro/additional_atlas_tools/atlas_structures.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
import pandas as pd
import numpy as np
from brainio import brainio
from skimage.filters import gaussian

from lesion_estimation.brain_tools import brain_render_tools

PATH_TO_STRUCTURES = "/home/slenzi/Desktop/pax_allen_structures.csv"
PATH_TO_ATLAS_IMAGE = "/home/slenzi/winstor/margrie/slenzi/serial2p/SL_997768/amap_analysis_310120/annotations.nii"


def load_atlas_structures_csv(path=PATH_TO_STRUCTURES):
df = pd.read_csv(path)
return df


def get_atlas_ids(df, parent_key):
ids = [int(id) for id in df[df["parent_id"] == parent_key]["id"]]
return ids


def create_hierarchy_paths(df):
all_paths = {}
all_ids = df["id"]
all_ids = all_ids[all_ids.notnull()]
for id in all_ids:
path = get_parents(df, id)
path = get_path_string_standard_fmt(path)
all_paths.setdefault(id, path)
return all_paths


def get_parents(df, k, parent=None, all_parents=None, root_id=997):
if parent is None:
all_parents = []
parent = df[df["id"] == k]["parent_id"].values[0]
all_parents.append(parent)

if int(parent) != root_id:
parent = df[df["id"] == parent]["parent_id"].values[0]
all_parents.append(parent)
return get_parents(df, k, parent, all_parents)
return all_parents[::-1]


def get_path_string_standard_fmt(all_parent_ids):
all_parent_ids = [str(i) for i in all_parent_ids]
return "/".join(all_parent_ids)


def add_to_df(df, df_dict):
df.insert(len(df.keys()), "structure_id_path", np.full(len(df), np.nan))
for k, v in df_dict.items():
loc = df[df["id"] == k].index[0]
df["structure_id_path"].iloc[loc] = v
return df


def get_all_children(df, k):
all_ids = df["id"]
all_ids = all_ids[all_ids.notnull()]
all_children = []
for id in all_ids:
all_parents = get_parents(df, id)
if k in all_parents:
all_children.append(id)
if len(all_children) == 0:
all_children.append(k)
return all_children


def render_all_subregions(atlas_id, out_dir, atlas_path=PATH_TO_ATLAS_IMAGE):
df = load_atlas_structures_csv()
atlas_ids = get_all_children(df, atlas_id)
atlas = brainio.load_any(atlas_path)
for idx in atlas_ids:
region_mask = atlas == idx
smoothed_region = smooth_structure(
region_mask, threshold=0.4, sigma=10
)
brain_render_tools.volume_to_vector_array_to_obj_file(
smoothed_region, f"{out_dir}/{idx}.obj"
)


def get_region_mask(atlas_id, atlas_path=PATH_TO_ATLAS_IMAGE, smooth=False):
df = load_atlas_structures_csv()
atlas_ids = get_all_children(df, atlas_id)
atlas = brainio.load_any(atlas_path)
all_regions = np.zeros_like(atlas)
for idx in atlas_ids:
region_mask = atlas == idx
all_regions = np.logical_or(region_mask, all_regions)

if smooth:
return smooth_structure(all_regions)

return all_regions


def smooth_structure(image, threshold=0.4, sigma=10):
for i in range(image.shape[1]):
image[:, i, :] = gaussian(image[:, i, :], sigma) > threshold
return image


def get_n_pixels_in_region(atlas_ids, atlas_path=PATH_TO_ATLAS_IMAGE):

atlas = brainio.load_any(atlas_path)
all_regions = np.zeros_like(atlas)

for id in atlas_ids:
region_mask = atlas == id
all_regions = np.logical_or(region_mask, all_regions)
all_regions_smooth = smooth_structure(all_regions)
return np.count_nonzero(all_regions_smooth)


def get_region(atlas_ids, atlas_path=PATH_TO_ATLAS_IMAGE):
atlas = brainio.load_any(atlas_path)
all_regions = np.zeros_like(atlas)
for id in atlas_ids:
region_mask = atlas == id
all_regions = np.logical_or(region_mask, all_regions)
all_regions_smooth = smooth_structure(all_regions)
return all_regions_smooth
84 changes: 84 additions & 0 deletions neuro/brain_render_tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
from brainio import brainio
from brainrender.Utils.image import reorient_image, marching_cubes_to_obj
from brainrender.scene import Scene
from skimage import measure
import numpy as np

from lesion_estimation.brain_tools.atlas_structures import (
PATH_TO_ATLAS_IMAGE,
smooth_structure,
)


def volume_to_vector_array_to_obj_file(image, output_path, voxel_size=10):

oriented_binary = reorient_image(
image, invert_axes=[2], orientation="coronal"
)

verts, faces, normals, values = measure.marching_cubes_lewiner(
oriented_binary, 0, step_size=1
)

if voxel_size is not 1:
verts = verts * voxel_size

faces = faces + 1
marching_cubes_to_obj((verts, faces, normals, values), str(output_path))


def visualize_obj(obj_path, *args, color="lightcoral", **kwargs):
"""
Uses brainrender to visualize a .obj file registered to the Allen CCF
:param obj_path: str, path to a .obj file
:param color: str, color of object being rendered
"""
print("Visualizing : " + obj_path)
scene = Scene(add_root=True)
scene.add_from_file(obj_path, *args, c=color, **kwargs)

return scene


def create_scene(default_structures):
scene = Scene(add_root=True)
for structure in default_structures:
scene.add_brain_regions([structure], use_original_color=True)
return scene


def make_smoothed_atlas_region(
output_dir,
atlas_ids,
structure_name,
atlas_path=PATH_TO_ATLAS_IMAGE,
smoothing_threshold=0.4,
sigma=10,
voxel_size=10,
):

atlas = brainio.load_any(atlas_path)

all_regions = np.zeros_like(atlas)
for id in atlas_ids:
region_mask = atlas == id
all_regions = np.logical_or(region_mask, all_regions)

all_regions = smooth_structure(
all_regions, threshold=smoothing_threshold, sigma=sigma
)

output_path = f"{output_dir}{structure_name}.obj"
oriented_binary = reorient_image(
all_regions, invert_axes=[2], orientation="coronal"
)

verts, faces, normals, values = measure.marching_cubes_lewiner(
oriented_binary, 0, step_size=1
)

if voxel_size is not 1:
verts = verts * voxel_size

faces = faces + 1
marching_cubes_to_obj((verts, faces, normals, values), output_path)
136 changes: 136 additions & 0 deletions neuro/generic_neuro_tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
import pathlib
from pathlib import Path

import numpy as np
from brainio import brainio
from cellfinder.summarise import count_summary as cells_regions
from cellfinder.tools import source_files
from cellfinder.tools.source_files import source_custom_config
from imlib.general.exceptions import TransformationError
from imlib.general.system import safe_execute_command, SafeExecuteCommandError
from imlib.source.niftyreg_binaries import get_binary


SOURCE_IMAGE_NAME = "downsampled.nii"
DEFAULT_CONTROL_POINT_FILE = "inverse_control_point_file.nii"
DEFAULT_OUTPUT_FILE_NAME = "roi_transformed.nii"
DEFAULT_TEMP_FILE_NAME = "ROI_TMP.nii"
PROGRAM_NAME = "reg_resample"


def save_brain(image, source_image_path, output_path):
registration_config = source_custom_config()
atlas_scale, transformation_matrix = get_transform_space_params(
registration_config, source_image_path
)
brainio.to_nii(
image.astype(np.int16),
str(output_path),
scale=atlas_scale,
affine_transform=transformation_matrix,
)

print("finished generating roi image")


def get_transform_space_params(registration_config, destination_image):
atlas = brainio.load_nii(str(destination_image), as_array=False)
atlas_scale = atlas.header.get_zooms()
atlas_pixel_sizes = cells_regions.get_atlas_pixel_sizes(
registration_config
)
transformation_matrix = np.eye(4)
for i, axis in enumerate(("x", "y", "z")):
transformation_matrix[i, i] = atlas_pixel_sizes[axis]
return atlas_scale, transformation_matrix


def get_transformation_matrix(self):
atlas_pixel_sizes = cells_regions.get_atlas_pixel_sizes(self._atlas_config)
transformation_matrix = np.eye(4)
for i, axis in enumerate(("x", "y", "z")):
transformation_matrix[i, i] = atlas_pixel_sizes[axis]
self.transformation_matrix = transformation_matrix


def get_registration_cmd(
program_path,
floating_image_path,
output_file_name,
destination_image_filename,
control_point_file,
):
# TODO combine with amap.brain_registration
cmd = "{} -cpp {} -flo {} -ref {} -res {}".format(
program_path,
control_point_file,
floating_image_path,
destination_image_filename,
output_file_name,
)
return cmd


def transform_image_to_standard_space(
reg_dir="/home/slenzi/winstor/margrie/slenzi/serial2p/SL_1029105_1016719/SL_1029105/analysis_200116/registration/",
image_to_transform_fname="downsampled.nii",
output_fname="background_channel_reg_to_filtered_brain.nii",
):

reg_dir = Path(reg_dir)
image_to_transform = reg_dir / image_to_transform_fname
image_with_desired_coordinate_space = reg_dir / "brain_filtered.nii"
control_point_file = reg_dir / "inverse_control_point_file.nii"
output_path = reg_dir / output_fname

nifty_reg_binaries_folder = source_files.get_niftyreg_binaries()
program_path = get_binary(nifty_reg_binaries_folder, PROGRAM_NAME)

reg_cmd = get_registration_cmd(
program_path,
image_to_transform,
output_path,
image_with_desired_coordinate_space,
control_point_file,
)

log_file_path = output_path.parent / "roi_transform_log.txt"
error_file_path = output_path.parent / "roi_transform_error.txt"
safely_execute_amap_registration(error_file_path, log_file_path, reg_cmd)
print(f"Registered ROI image can be found at {output_path}")


def safely_execute_amap_registration(error_file_path, log_file_path, reg_cmd):
print("Running ROI registration")
try:
safe_execute_command(reg_cmd, log_file_path, error_file_path)
except SafeExecuteCommandError as err:
raise TransformationError("ROI registration failed; {}".format(err))


def transform_all_channels_to_standard_space(reg_dir):
p = pathlib.Path(reg_dir)
all_channels = list(p.glob("downsampled_channel*"))
if any("registered" in f.name for f in all_channels):
return "looks like already processed... skipping..."

for channel in all_channels:
transform_image_to_standard_space(
reg_dir,
image_to_transform_fname=channel.name,
output_fname=f"registered_{channel.name}",
)


def transform_background_channel_to_standard_space(reg_dir):
p = pathlib.Path(reg_dir)
all_channels = list(p.glob("downsampled.nii"))
if any("registered" in f.name for f in all_channels):
return "looks like already processed... skipping..."

for channel in all_channels:
transform_image_to_standard_space(
reg_dir,
image_to_transform_fname=channel.name,
output_fname=f"registered_{channel.name}",
)
Empty file.
Loading