Automatic and manual segmentation pipeline¶
[ ]:
import brainlit
from brainlit.utils.session import NeuroglancerSession
from brainlit.utils.Neuron_trace import NeuronTrace
from brainlit.algorithms.generate_fragments import adaptive_thresh
import napari
from napari.utils import nbscreenshot
%gui qt5
Find valid segments¶
In this cell, we set up a NeuroglancerSession object. Since segmentation ID numbers are not in order, we print out a list of valid IDs in some range id_range
. Most segment IDs are in range(300)
, additionally, segments 999
and 1000
are available.
[ ]:
"""
# Optional: Print the IDs of segments in Neuroglancer
url = "s3://open-neurodata/brainlit/brain1"
ngl_skel = NeuroglancerSession(url+"_segments", mip=1,use_https=False)
working_ids = []
id_range = 14
for seg_id in range(id_range):
try:
segment = ngl_skel.cv.skeleton.get(seg_id)
working_ids.append(seg_id)
except:
pass
print(working_ids)
"""
Download SWC information¶
Download the information contained in a SWC file for labelled vertices of a given seg_id
at a valid mip
from AWS.
[ ]:
"""
seg_id = 13
mip = 2
s3_trace = NeuronTrace(url+"_segments", seg_id, mip)
df = s3_trace.get_df()
df['sample'].size # the number of vertex IDs [1, 2, ..., df['sample'].size]
"""
[ ]:
"""
print(df)
"""
Select vertices¶
Select a subset of the vertices in the downloaded SWC to view and segment.
[ ]:
"""
subneuron_df = df[0:5] # choose vertices to use for the subneuron
vertex_list = subneuron_df['sample'].array
print(vertex_list)
"""
Download the Volume¶
Download the volume containing the specified vertices.
[ ]:
"""
ngl = NeuroglancerSession(url, mip=mip)
buffer = 10
img, bounds, vox_in_img_list = ngl.pull_vertex_list(seg_id, vertex_list, buffer = buffer, expand = True)
"""
Plot¶
[ ]:
"""
# Reference: https://github.com/NeuroDataDesign/mouselit/blob/master/bijan/mouse_test/final%20notebook.ipynb
def napari_viewer(img, labels=None, shapes=None, label_name="Segmentation"):
viewer = napari.view_image(np.squeeze(np.array(img)))
if labels is not None:
viewer.add_labels(labels, name=label_name)
if shapes is not None:
viewer.add_shapes(data=shapes, shape_type='path', edge_color='blue', name='Skeleton')
return viewer
"""
Let’s take a look at the downloaded volume. Napari will open in a new window.
[ ]:
"""
viewer = napari.Viewer(ndisplay=3)
viewer.add_image(img)
nbscreenshot(viewer)
"""
[ ]:
"""
n=napari_viewer(img)
"""
[ ]:
"""
import inspect
a = repr(n)
print(a)
b = repr(n).find(('napari.viewer.Viewer'))
print(b)
"""
[ ]:
"""
n.window.close()
"""
[ ]:
# We get a `corrected_subneuron_df` that contains `(x,y,z)` coordinates within the downloaded volume for the vertices in the SWC.
[ ]:
"""
import inspect
a = repr(n)
print(a)
b = repr(n).find(('napari.viewer.Viewer'))
print(b)
"""
[ ]:
# We get a `corrected_subneuron_df` that contains `(x,y,z)` coordinates within the downloaded volume for the vertices in the SWC.
[ ]:
"""
import inspect
a = repr(n)
print(a)
b = repr(n).find(('napari.viewer.Viewer'))
print(b)
"""
[ ]:
# We get a `corrected_subneuron_df` that contains `(x,y,z)` coordinates within the downloaded volume for the vertices in the SWC.
[ ]:
"""
import inspect
a = repr(n)
print(a)
b = repr(n).find(('napari.viewer.Viewer'))
print(b)
"""
[ ]:
# We get a `corrected_subneuron_df` that contains `(x,y,z)` coordinates within the downloaded volume for the vertices in the SWC.
[ ]:
"""
transpose = vox_in_img_list.T
vox_in_img_list_t = transpose.tolist()
corrected_subneuron_df = s3_trace.generate_df_subset(list(vox_in_img_list_t), subneuron_start = 0, subneuron_end = 5)
print(corrected_subneuron_df)
"""
Convert the SWC to a graph and print some information about the graph.
[ ]:
"""
G = s3_trace._df_to_graph(df_voxel=corrected_subneuron_df)
print('Number of nodes:', len(G.nodes))
print('Number of edges:', len(G.edges))
print('Sample 1 coordinates (x,y,z):', G.nodes[1])
paths = s3_trace._graph_to_paths(G)
print('Number of paths:', len(paths))
"""
We can display the SWC on the Volume
[ ]:
"""
%gui qt
napari_viewer(img, shapes=paths)
nbscreenshot(viewer)
"""
Automatically segment the neuron¶
We start by converting the seed points to a format used by the thresholding.
[ ]:
"""
seed = [adaptive_thresh.get_seed(sample)[1] for sample in vox_in_img_list]
print(seed)
"""
Next, we compute a confidence-connected threshold segmentation.
[ ]:
"""
labels = adaptive_thresh.confidence_connected_threshold(img, seed, num_iter=1, multiplier=0.5)
"""
We can display the volume, SWC, and segmentation in Napari.
[ ]:
"""
%gui qt
viewer = napari_viewer(img, labels=labels, shapes=paths, label_name="Confidence-Connected Threshold")
nbscreenshot(viewer)
"""
Steps to Manually Edit Labels¶
Labels can be manually edited following these steps:
Ensure Napari is in 2D-slice viewing, not 3D view. (The second button from the bottom left)
Click the image layer and adjust the contrast limits as desired.
Click the “Confidence-Connected Threshold Layer”
Click the paintbrush tool and adjust the brush size. Ensure that “label” is set to 1 to paint and 0 to erase.
Click and drag on the image to adjust labels. Changes are saved automatically, and CMD-Z to undo is supported.
Extract the manual labels for uploading.
[ ]:
# manual_labels = viewer.layers['Confidence-Connected Threshold'].data
Upload the segmentation to AWS.
[ ]:
# %%capture
# ngl_upload = NeuroglancerSession(url+"_seg", mip=mip)
# ngl_upload.push(manual_labels, bounds);
Confirm that the upload was successful. It was!
[ ]:
# downloaded_labels = ngl_upload.pull_bounds_seg(bounds)
[ ]:
# print(np.all(manual_labels == downloaded_labels))