Automatic and manual segmentation pipeline¶
[1]:
import brainlit
from brainlit.utils.session import NeuroglancerSession
from brainlit.utils.Neuron_trace import NeuronTrace
from brainlit.algorithms.generate_fragments import adaptive_thresh
import napari
from napari.utils import nbscreenshot
%gui qt5
/opt/buildhome/python3.7/lib/python3.7/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm
Find valid segments¶
In this cell, we set up a NeuroglancerSession object. Since segmentation ID numbers are not in order, we print out a list of valid IDs in some range id_range
. Most segment IDs are in range(300)
, additionally, segments 999
and 1000
are available.
[2]:
"""
# Optional: Print the IDs of segments in Neuroglancer
url = "s3://open-neurodata/brainlit/brain1"
ngl_skel = NeuroglancerSession(url+"_segments", mip=1,use_https=False)
working_ids = []
id_range = 14
for seg_id in range(id_range):
try:
segment = ngl_skel.cv.skeleton.get(seg_id)
working_ids.append(seg_id)
except:
pass
print(working_ids)
"""
[2]:
'\n# Optional: Print the IDs of segments in Neuroglancer\nurl = "s3://open-neurodata/brainlit/brain1"\nngl_skel = NeuroglancerSession(url+"_segments", mip=1,use_https=False)\nworking_ids = []\nid_range = 14\nfor seg_id in range(id_range): \n try:\n segment = ngl_skel.cv.skeleton.get(seg_id)\n working_ids.append(seg_id)\n except:\n pass\nprint(working_ids)\n'
Download SWC information¶
Download the information contained in a SWC file for labelled vertices of a given seg_id
at a valid mip
from AWS.
[3]:
"""
seg_id = 13
mip = 2
s3_trace = NeuronTrace(url+"_segments", seg_id, mip)
df = s3_trace.get_df()
df['sample'].size # the number of vertex IDs [1, 2, ..., df['sample'].size]
"""
[3]:
'\nseg_id = 13\nmip = 2\ns3_trace = NeuronTrace(url+"_segments", seg_id, mip)\ndf = s3_trace.get_df()\ndf[\'sample\'].size # the number of vertex IDs [1, 2, ..., df[\'sample\'].size]\n'
[4]:
"""
print(df)
"""
[4]:
'\nprint(df)\n'
Select vertices¶
Select a subset of the vertices in the downloaded SWC to view and segment.
[5]:
"""
subneuron_df = df[0:5] # choose vertices to use for the subneuron
vertex_list = subneuron_df['sample'].array
print(vertex_list)
"""
[5]:
"\nsubneuron_df = df[0:5] # choose vertices to use for the subneuron\nvertex_list = subneuron_df['sample'].array \nprint(vertex_list)\n"
Download the Volume¶
Download the volume containing the specified vertices.
[6]:
"""
ngl = NeuroglancerSession(url, mip=mip)
buffer = 10
img, bounds, vox_in_img_list = ngl.pull_vertex_list(seg_id, vertex_list, buffer = buffer, expand = True)
"""
[6]:
'\nngl = NeuroglancerSession(url, mip=mip)\nbuffer = 10\nimg, bounds, vox_in_img_list = ngl.pull_vertex_list(seg_id, vertex_list, buffer = buffer, expand = True)\n'
Plot¶
[7]:
"""
# Reference: https://github.com/NeuroDataDesign/mouselit/blob/master/bijan/mouse_test/final%20notebook.ipynb
def napari_viewer(img, labels=None, shapes=None, label_name="Segmentation"):
viewer = napari.view_image(np.squeeze(np.array(img)))
if labels is not None:
viewer.add_labels(labels, name=label_name)
if shapes is not None:
viewer.add_shapes(data=shapes, shape_type='path', edge_color='blue', name='Skeleton')
return viewer
"""
[7]:
'\n# Reference: https://github.com/NeuroDataDesign/mouselit/blob/master/bijan/mouse_test/final%20notebook.ipynb\ndef napari_viewer(img, labels=None, shapes=None, label_name="Segmentation"):\n viewer = napari.view_image(np.squeeze(np.array(img)))\n if labels is not None:\n viewer.add_labels(labels, name=label_name)\n if shapes is not None:\n viewer.add_shapes(data=shapes, shape_type=\'path\', edge_color=\'blue\', name=\'Skeleton\')\n return viewer\n'
Let’s take a look at the downloaded volume. Napari will open in a new window.
[8]:
"""
viewer = napari.Viewer(ndisplay=3)
viewer.add_image(img)
nbscreenshot(viewer)
"""
[8]:
'\nviewer = napari.Viewer(ndisplay=3)\nviewer.add_image(img)\nnbscreenshot(viewer)\n'
[9]:
"""
n=napari_viewer(img)
"""
[9]:
'\nn=napari_viewer(img)\n'
[10]:
"""
import inspect
a = repr(n)
print(a)
b = repr(n).find(('napari.viewer.Viewer'))
print(b)
"""
[10]:
"\nimport inspect\na = repr(n)\nprint(a)\n\nb = repr(n).find(('napari.viewer.Viewer'))\nprint(b)\n"
[11]:
"""
n.window.close()
"""
[11]:
'\nn.window.close()\n'
[12]:
#We get a `corrected_subneuron_df` that contains `(x,y,z)` coordinates within the downloaded volume for the vertices in the SWC.
[13]:
"""
import inspect
a = repr(n)
print(a)
b = repr(n).find(('napari.viewer.Viewer'))
print(b)
"""
[13]:
"\nimport inspect\na = repr(n)\nprint(a)\n\nb = repr(n).find(('napari.viewer.Viewer'))\nprint(b)\n"
[14]:
#We get a `corrected_subneuron_df` that contains `(x,y,z)` coordinates within the downloaded volume for the vertices in the SWC.
[15]:
"""
import inspect
a = repr(n)
print(a)
b = repr(n).find(('napari.viewer.Viewer'))
print(b)
"""
[15]:
"\nimport inspect\na = repr(n)\nprint(a)\n\nb = repr(n).find(('napari.viewer.Viewer'))\nprint(b)\n"
[16]:
# We get a `corrected_subneuron_df` that contains `(x,y,z)` coordinates within the downloaded volume for the vertices in the SWC.
[17]:
"""
import inspect
a = repr(n)
print(a)
b = repr(n).find(('napari.viewer.Viewer'))
print(b)
"""
[17]:
"\nimport inspect\na = repr(n)\nprint(a)\n\nb = repr(n).find(('napari.viewer.Viewer'))\nprint(b)\n"
[18]:
# We get a `corrected_subneuron_df` that contains `(x,y,z)` coordinates within the downloaded volume for the vertices in the SWC.
[19]:
"""
transpose = vox_in_img_list.T
vox_in_img_list_t = transpose.tolist()
corrected_subneuron_df = s3_trace.generate_df_subset(list(vox_in_img_list_t), subneuron_start = 0, subneuron_end = 5)
print(corrected_subneuron_df)
"""
[19]:
'\ntranspose = vox_in_img_list.T\nvox_in_img_list_t = transpose.tolist()\n\ncorrected_subneuron_df = s3_trace.generate_df_subset(list(vox_in_img_list_t), subneuron_start = 0, subneuron_end = 5)\nprint(corrected_subneuron_df)\n'
Convert the SWC to a graph and print some information about the graph.
[20]:
"""
G = s3_trace._df_to_graph(df_voxel=corrected_subneuron_df)
print('Number of nodes:', len(G.nodes))
print('Number of edges:', len(G.edges))
print('Sample 1 coordinates (x,y,z):', G.nodes[1])
paths = s3_trace._graph_to_paths(G)
print('Number of paths:', len(paths))
"""
[20]:
"\nG = s3_trace._df_to_graph(df_voxel=corrected_subneuron_df)\nprint('Number of nodes:', len(G.nodes))\nprint('Number of edges:', len(G.edges))\nprint('Sample 1 coordinates (x,y,z):', G.nodes[1])\npaths = s3_trace._graph_to_paths(G)\nprint('Number of paths:', len(paths))\n"
We can display the SWC on the Volume
[21]:
"""
%gui qt
napari_viewer(img, shapes=paths)
nbscreenshot(viewer)
"""
[21]:
'\n%gui qt\nnapari_viewer(img, shapes=paths)\nnbscreenshot(viewer)\n'
Automatically segment the neuron¶
We start by converting the seed points to a format used by the thresholding.
[22]:
"""
seed = [adaptive_thresh.get_seed(sample)[1] for sample in vox_in_img_list]
print(seed)
"""
[22]:
'\nseed = [adaptive_thresh.get_seed(sample)[1] for sample in vox_in_img_list]\nprint(seed)\n'
Next, we compute a confidence-connected threshold segmentation.
[23]:
"""
labels = adaptive_thresh.confidence_connected_threshold(img, seed, num_iter=1, multiplier=0.5)
"""
[23]:
'\nlabels = adaptive_thresh.confidence_connected_threshold(img, seed, num_iter=1, multiplier=0.5)\n'
We can display the volume, SWC, and segmentation in Napari.
[24]:
"""
%gui qt
viewer = napari_viewer(img, labels=labels, shapes=paths, label_name="Confidence-Connected Threshold")
nbscreenshot(viewer)
"""
[24]:
'\n%gui qt\nviewer = napari_viewer(img, labels=labels, shapes=paths, label_name="Confidence-Connected Threshold")\nnbscreenshot(viewer)\n'
Steps to Manually Edit Labels¶
Labels can be manually edited following these steps:
Ensure Napari is in 2D-slice viewing, not 3D view. (The second button from the bottom left)
Click the image layer and adjust the contrast limits as desired.
Click the “Confidence-Connected Threshold Layer”
Click the paintbrush tool and adjust the brush size. Ensure that “label” is set to 1 to paint and 0 to erase.
Click and drag on the image to adjust labels. Changes are saved automatically, and CMD-Z to undo is supported.
Extract the manual labels for uploading.
[25]:
#manual_labels = viewer.layers['Confidence-Connected Threshold'].data
Upload the segmentation to AWS.
[26]:
#%%capture
#ngl_upload = NeuroglancerSession(url+"_seg", mip=mip)
#ngl_upload.push(manual_labels, bounds);
Confirm that the upload was successful. It was!
[27]:
#downloaded_labels = ngl_upload.pull_bounds_seg(bounds)
[28]:
#print(np.all(manual_labels == downloaded_labels))