{ "cells": [ { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Axon Segmentation Analysis of Whole-Brain Fluorescence Images" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from brainlit.BrainLine.util import (\n", " json_to_points,\n", " download_subvolumes,\n", ")\n", "from brainlit.BrainLine.parse_ara import *\n", "import xml.etree.ElementTree as ET\n", "from brainlit.BrainLine.imports import *\n", "from brainlit.BrainLine.apply_ilastik import (\n", " ApplyIlastik,\n", " ApplyIlastik_LargeImage,\n", " plot_results,\n", " examine_threshold,\n", " downsample_mask,\n", ")\n", "from brainlit.BrainLine.analyze_results import (\n", " AxonDistribution,\n", " collect_regional_segmentation,\n", ")\n", "\n", "%gui qt5" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 1. Before Using this notebook" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### 1a. Install brainlit, and dependencies\n", "### 1b. Write images to s3 using CloudReg\n", " - e.g. python -m cloudreg.scripts.create_precomputed_volumes --s3_input_paths --s3_output_paths --voxel_size --num_procs --resample_iso \n", "### 1c. Make point annotations in neuroglancer to identify subvolumes for validation (and possible training)\n", "[Instructions](https://neurodata.io/help/neuroglancer-pt-annotations/). JSON snippet:\n", "\n", " ,\n", " {\n", " \"type\":\"pointAnnotation\",\n", " \"name\": \"val\",\n", " \"points\": []\n", " }\n", "### 1d. Update axon_data.json file\n", "\n", "This file stores information on how to access neuroglancer data.\n", "\n", "Data should be stored in the ``brain2paths`` dictionary, with entries like:\n", "\n", " \"\": {\n", " \"base_local\": \"\",\n", " \"base_s3\": \"\",\n", " \"val_info\": {\n", " \"url\": \"\",\n", " \"somas_layer\": \"\",\n", " \"nonsomas_layer\": \"\",\n", " },\n", " \"subtype\": \"\"\n", " #Optional:\n", " \"train_info\": {\n", " \"url\": \"\",\n", " \"somas_layer\": \"\",\n", " \"nonsomas_layer\": \"\",\n", " },\n", " \"transformed mask\": \"\"\n", " }" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "brainlit_path = Path(os.path.abspath(\"\"))\n", "brainlit_path = brainlit_path.parents[3]\n", "print(f\"Path to brainlit: {brainlit_path}\")\n", "data_file = (\n", " brainlit_path / \"docs\" / \"notebooks\" / \"pipelines\" / \"BrainLine\" / \"axon_data.json\"\n", ")\n", "\n", "brain = \"test\" # brain ID\n", "axon_data_dir = (\n", " str(\n", " brainlit_path\n", " / \"docs\"\n", " / \"notebooks\"\n", " / \"pipelines\"\n", " / \"BrainLine\"\n", " / \"validation-data\"\n", " / \"axon\"\n", " )\n", " + \"/\"\n", ") # path to directory where training/validation data should be stored\n", "\n", "# Modify base path of test sample according to your system\n", "with open(data_file, \"r\") as f:\n", " data = json.load(f)\n", "\n", "\n", "base = (\n", " \"precomputed://file://\"\n", " + str(\n", " brainlit_path\n", " / \"docs\"\n", " / \"notebooks\"\n", " / \"pipelines\"\n", " / \"BrainLine\"\n", " / \"example-data\"\n", " )\n", " + \"/\"\n", ")\n", "data[\"brain2paths\"][\"test\"][\"base_s3\"] = base\n", "data[\"brain2paths\"][\"test\"][\"base_local\"] = base\n", "\n", "brain2paths = data[\"brain2paths\"]\n", "\n", "with open(data_file, \"w\") as f:\n", " json.dump(data, f, indent=4)" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Validation: Steps 2, 4-5, 7 below can be done via a script: `brainlit/BrainLine/scripts/axon_validate.py`" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 2. Download benchmark data" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### \\*Inputs\\*\n", "\n", "For the test sample to work, you should be serving it via neuroglancer's [cors_webserver](https://github.com/google/neuroglancer) e.g. with the command ``python cors_webserver.py -d \"/Users/thomasathey/Documents/mimlab/mouselight/brainlit_parent/brainlit/brainlit/BrainLine/data/example\" -p 9010``" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "antibody_layer = \"antibody\"\n", "background_layer = \"background\"\n", "endogenous_layer = \"endogenous\"\n", "\n", "dataset_to_save = \"val\" # train or val\n", "\n", "layer_names = [antibody_layer, background_layer, endogenous_layer]" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### Download data" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "download_subvolumes(\n", " axon_data_dir,\n", " brain_id=brain,\n", " data_file=data_file,\n", " layer_names=layer_names,\n", " dataset_to_save=dataset_to_save,\n", ")" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 3. View downloaded data (optional)" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### \\*Inputs\\*" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fname = fname = (\n", " Path(axon_data_dir) / f\"brain{brain}\" / \"val\" / \"2054_1951_1825.h5\"\n", ") # path to file for viewing\n", "scale = [1.8, 1.8, 2] # voxel size in microns" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "with h5py.File(fname, \"r\") as f:\n", " pred = f.get(\"image_3channel\")\n", " image_bg = pred[0, :, :, :]\n", " image_fg = pred[1, :, :, :]\n", " image_endo = pred[2, :, :, :]\n", "\n", "viewer = napari.Viewer(ndisplay=3)\n", "viewer.add_image(image_fg, scale=scale)\n", "viewer.add_image(image_bg, scale=scale)\n", "viewer.add_image(image_endo, scale=scale)\n", "viewer.scale_bar.visible = True\n", "viewer.scale_bar.unit = \"um\"" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 4. Apply Ilastik to validation data" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "You will need to do two things:\n", "- Add annotations to the downloaded data (for me, partial labels on 3 of the z-slices using ilastik) and export Labels to the same directory\n", "- Apply axon segmentation model to the downloaded data. Results should be located in the same directory at the subvolumes, with the addition of \"_Probabilities\" appended to the file names: you can do this programmatically (below), or you can use the ilastik GUI (which is sometimes faster)\n", "\n", "Note: make sure foreground/background labels are matched between the model and your annotations (for me, blue/1 =axon yellow/0=bg)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ilastik_project = str(\n", " brainlit_path / Path(\"experiments/BrainLine/data/models/axon/axon_segmentation.ilp\")\n", ") # path to ilastik model to be used\n", "ilastik_path = (\n", " \"/Applications/ilastik-1.4.0b21-OSX.app/Contents/ilastik-release/run_ilastik.sh\"\n", ")\n", "brains = [brain]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "applyilastik = ApplyIlastik(\n", " ilastik_path=ilastik_path,\n", " project_path=ilastik_project,\n", " brains_path=axon_data_dir,\n", " brains=brains,\n", ")\n", "applyilastik.process_subvols(ncpu=1)" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 5. Check results" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plot_results(\n", " data_dir=axon_data_dir, brain_ids=[brain], positive_channel=1, object_type=\"axon\"\n", ")\n", "plt.show()" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### If results above are not adequate improve the model and try again\n", "\n", "In my case, I identify more subvolumes from the sample at hand using the same process as for validation data, and add it as training data to the model and retrain.\n" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### Examine best threshold" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "examine_threshold(\n", " data_dir=axon_data_dir,\n", " brain_id=brain,\n", " threshold=0.52,\n", " object_type=\"axon\",\n", " positive_channel=1,\n", ")" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 6. Apply ilastik to whole image:\n", "## This can be done alternatively via a script with: `brainlit/BrainLine/axon_segment_image`" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### \\* Inputs \\*" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "threshold = 0.52 # threshold to use for ilastik\n", "data_dir = (\n", " axon_data_dir + \"brain_temp/\"\n", ") # directory to store temporary subvolumes for segmentation\n", "\n", "min_coords = [2000, 1900, 1800]\n", "max_coords = [2100, 2000, 1900] # -1 if you want to process the whole dimension\n", "\n", "ncpu = 1 # number of cores to use for detection\n", "chunk_size = [100, 100, 100]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "layer_names = [antibody_layer, background_layer, endogenous_layer]\n", "alli = ApplyIlastik_LargeImage(\n", " ilastik_path=ilastik_path,\n", " ilastik_project=ilastik_project,\n", " ncpu=ncpu,\n", " data_file=data_file,\n", ")\n", "alli.apply_ilastik_parallel(\n", " brain_id=brain,\n", " layer_names=layer_names,\n", " threshold=threshold,\n", " data_dir=data_dir,\n", " chunk_size=chunk_size,\n", " min_coords=min_coords,\n", " max_coords=max_coords,\n", ")\n", "alli.collect_axon_results(brain_id=brain, ng_layer_name=\"antibody\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "downsample_mask(\n", " brain, data_file=data_file, ncpu=1, bounds=[[2000, 1600, 1600], [2400, 2000, 2000]]\n", ")" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 7. Prepare Transformed Layers" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "atlas_vol = CloudVolume(\n", " \"precomputed://https://open-neurodata.s3.amazonaws.com/ara_2016/sagittal_10um/annotation_10um_2017\"\n", ")\n", "for layer in [\n", " antibody_layer,\n", " background_layer,\n", " \"axon_mask\",\n", "]: # axon_mask is transformed into an image because nearest interpolation doesnt work well after downsampling\n", " layer_path = brain2paths[brain][\"base\"] + layer + \"_transformed\"\n", " info = CloudVolume.create_new_info(\n", " num_channels=1,\n", " layer_type=\"image\",\n", " data_type=\"uint16\", # Channel images might be 'uint8'\n", " encoding=\"raw\", # raw, jpeg, compressed_segmentation, fpzip, kempressed\n", " resolution=atlas_vol.resolution, # Voxel scaling, units are in nanometers\n", " voxel_offset=atlas_vol.voxel_offset,\n", " chunk_size=[32, 32, 32], # units are voxels\n", " volume_size=atlas_vol.volume_size, # e.g. a cubic millimeter dataset\n", " )\n", " vol_mask = CloudVolume(layer_path, info=info)\n", " vol_mask.commit_info()" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 8. Register volume and transform data to atlas space using CloudReg" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### 8a. You need to find an initial affine alignment using cloudreg.scripts.registration.get_affine_matrix. For example: \n", "\n" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "A link to the ARA parcellation is:\n", "\n", "`precomputed://https://open-neurodata.s3.amazonaws.com/ara_2016/sagittal_10um/annotation_10um_2017`\n", "\n", "And some python commands to help with affine alignment is:\n", "\n", "```\n", "from cloudreg.scripts.registration import get_affine_matrix\n", "get_affine_matrix([1,1,1], [15,0,0], \"PIR\", \"RAI\", 1.15, \"precomputed://https://open-neurodata.s3.amazonaws.com/ara_2016/sagittal_10um/annotation_10um_2017\")\n", "```" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### 8b. Run registration using cloudreg.scripts.registration. For example:" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "```\n", "python -m cloudreg.scripts.registration -input_s3_path precomputed://s3://smartspim-precomputed-volumes/2022_11_01/8790/Ch_561 --output_s3_path precomputed://s3://smartspim-precomputed-volumes/2022_11_01/8790/atlas_to_target --atlas_s3_path https://open-neurodata.s3.amazonaws.com/ara_2016/sagittal_50um/average_50um --parcellation_s3_path https://open-neurodata.s3.amazonaws.com/ara_2016/sagittal_10um/annotation_10um_2017 --atlas_orientation PIR -orientation RAI --rotation 0 0 0 --translation 0 0 0 --fixed_scale 1.2 -log_s3_path precomputed://s3://smartspim-precomputed-volumes/2022_11_01/8790/atlas_to_target --missing_data_correction True --grid_correction False --bias_correction True --regularization 5000.0 --iterations 3000 --registration_resolution 100\n", "```" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### 8c. Transform segmentation to atlas space using CloudReg" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "```\n", "python -m cloudreg.scripts.transform_data --target_layer_source precomputed://file:///cis/home/tathey/brainline-example/axon_mask --transformed_layer_source precomputed://file:///cis/home/tathey/brainline-example/axon_mask_transformed --affine_path \"/cis/home/tathey/brainline-example/downloop_1_A.mat\" --velocity_path \"/cis/home/tathey/brainline-example/downloop_1_v.mat\"\n", "```\n", "\n", "This will write a layer to s3 with the transformed axon mask. The path to this layer should be added to `axon_data.py` under the `transformed_mask` key. " ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Analysis: Steps 9-11 below can be done alternatively via a script with: `brainlit/BrainLine/scripts/axon_analyze.py`" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 9. Combine registration and segmentation results" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "collect_regional_segmentation(\n", " brain_id=brain,\n", " data_file=data_file,\n", " outdir=axon_data_dir,\n", " max_coords=max_coords,\n", " min_coords=min_coords,\n", " ncpu=2,\n", ")" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 10. View axon segmentation in brain space" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### \\*Inputs\\*" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "brain_ids = [\"test\"]\n", "\n", "colors = {\n", " \"test_type\": \"red\",\n", "} # colors for different genotypes\n", "fold_on = False\n", "\n", "ontology_file = (\n", " brainlit_path / \"brainlit\" / \"Brainline\" / \"data\" / \"ara_structure_ontology.json\"\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ad = AxonDistribution(\n", " brain_ids=brain_ids,\n", " data_file=data_file,\n", " ontology_file=ontology_file,\n", " regional_distribution_dir=axon_data_dir,\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ad.napari_coronal_section(z=1080, subtype_colors=colors, fold_on=fold_on)" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## 11. Display bar charts" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### \\*Inputs\\*" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "wholebrain_results_dir = \"\" #\n", "\n", "brains = [brain] # list of sample IDs to be shown\n", "\n", "regions = [\n", " 512, # cerebellum\n", " 688, # cerebral cortex\n", " 698, # olfactory areas\n", " 1089, # hippocampal formation\n", " # 583, # claustrum\n", " 477, # striatum\n", " # 803, # pallidum\n", " 351, # bed nuclei of stria terminalis\n", " # 703, #cortical subplate\n", " 1097, # hypothalamus\n", " 549, # thalamus\n", " 186, # lateral habenula\n", " 519, # cerebellar nuclei\n", " 313, # midbrain\n", " 1065, # hindbrain\n", "] # allen atlas region IDs to be shown\n", "# see: https://connectivity.brain-map.org/projection/experiment/480074702?imageId=480075280&initImage=TWO_PHOTON&x=17028&y=11704&z=3\n", "\n", "composite_regions = {\n", " \"Amygdalar Nuclei\": [131, 295, 319, 780]\n", "} # Custom composite allen regions where key is region name and value is list of allen regions" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ad.region_barchart(regions, composite_regions=composite_regions, normalize_region=512)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "interpreter": { "hash": "5dc00d68ff54f8375e99934614da4863299fb9e10af4294c095b7f517546ff26" }, "kernelspec": { "display_name": "Python 3.8.10 64-bit ('docs_env': venv)", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" }, "metadata": { "interpreter": { "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" } } }, "nbformat": 4, "nbformat_minor": 2 }