diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..ca9c8f1 --- /dev/null +++ b/404.html @@ -0,0 +1,1154 @@ + + + +
+ + + + + + + + + + + + + + + + + + +compute module, part of cuisto.
+Contains actual computation functions.
+ + + + + + + + +
get_distribution(df, col, hue, hue_filter, per_commonnorm, binlim, nbins=100)
+
+#Computes distribution of objects.
+A global distribution using only col
is computed, then it computes a distribution
+distinguishing values in the hue
column. For the latter, it is possible to use a
+subset of the data ony, based on another column using hue_filter
. This another
+column is determined with hue
, if the latter is "hemisphere", then hue_filter
is
+used in the "channel" color and vice-versa.
+per_commonnorm
controls how they are normalized, either as a whole (True) or
+independantly (False).
Use cases :
+(1) single-channel, two hemispheres : col=x
, hue=hemisphere
, hue_filter=""
,
+per_commonorm=True
. Computes a distribution for each hemisphere, the sum of the
+area of both is equal to 1.
+(2) three-channels, one hemisphere : col=x
, hue=channel
,
+hue_filter="Ipsi.", per_commonnorm=False
. Computes a distribution for each channel
+only for points in the ipsilateral hemisphere. Each curve will have an area of 1.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ col
+ |
+
+ str
+ |
+
+
+
+ Key in |
+ + required + | +
+ hue
+ |
+
+ str
+ |
+
+
+
+ Key in |
+ + required + | +
+ hue_filter
+ |
+
+ str
+ |
+
+
+
+ Further filtering for "per" distribution. +- hue = channel : value is the name of one of the hemisphere +- hue = hemisphere : value can be the name of a channel, a list of such or "all" + |
+ + required + | +
+ per_commonnorm
+ |
+
+ bool
+ |
+
+
+
+ Use common normalization for all hues (per argument). + |
+ + required + | +
+ binlim
+ |
+
+ list or tuple
+ |
+
+
+
+ First bin left edge and last bin right edge. + |
+ + required + | +
+ nbins
+ |
+
+ int
+ |
+
+
+
+ Number of bins. Default is 100. + |
+
+ 100
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df_distribution |
+ DataFrame
+ |
+
+
+
+ DataFrame with |
+
cuisto/compute.py
180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 |
|
get_regions_metrics(df_annotations, object_type, channel_names, meas_base_name, metrics_names)
+
+#Get a new DataFrame with cumulated axons segments length in each brain regions.
+This is the quantification per brain regions for fibers-like objects, eg. axons. The +returned DataFrame has columns "cum. length µm", "cum. length mm", "density µm^-1", +"density mm^-1", "coverage index".
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df_annotations
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame with an entry for each brain regions, with columns "Area µm^2", +"Name", "hemisphere", and "{object_type: channel} Length µm". + |
+ + required + | +
+ object_type
+ |
+
+ str
+ |
+
+
+
+ Object type (primary classification). + |
+ + required + | +
+ channel_names
+ |
+
+ dict
+ |
+
+
+
+ Map between original channel names to something else. + |
+ + required + | +
+ meas_base_name
+ |
+
+ str
+ |
+
+
+
+
+ |
+ + required + | +
+ metrics_names
+ |
+
+ dict
+ |
+
+
+
+
+ |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
df_regions |
+ DataFrame
+ |
+
+
+
+ DataFrame with brain regions name, area and metrics. + |
+
cuisto/compute.py
13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 |
|
normalize_starter_cells(df, cols, animal, info_file, channel_names)
+
+#Normalize data by the number of starter cells.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ Contains the data to be normalized. + |
+ + required + | +
+ cols
+ |
+
+ list - like
+ |
+
+
+
+ Columns to divide by the number of starter cells. + |
+ + required + | +
+ animal
+ |
+
+ str
+ |
+
+
+
+ Animal ID to parse the number of starter cells. + |
+ + required + | +
+ info_file
+ |
+
+ str
+ |
+
+
+
+ Full path to the TOML file with informations. + |
+ + required + | +
+ channel_names
+ |
+
+ dict
+ |
+
+
+
+ Map between original channel names to something else. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ DataFrame
+ |
+
+
+
+ Same |
+
cuisto/compute.py
object_type
: name of QuPath base classification (eg. without the ": subclass" part)
+segmentation_tag
: type of segmentation, matches directory name, used only in the full pipeline
Information related to the atlas used
+name
: brainglobe-atlasapi atlas name
+type
: "brain" or "cord" (eg. registration done in ABBA or abba_python). This will determine whether to flip Left/Right when determining detections hemisphere based on their coordinates. Also adapts the axes in the 2D heatmaps.
+midline
: midline Z coordinates (left/right limit) in microns to determine detections hemisphere based on their coordinates.
+outline_structures
: structures to show an outline of in heatmaps
Information related to imaging channels
+Must contain all classifications derived from "object_type" you want to process. In the form subclassification name = name to display on the plots
"marker+"
: classification name = name to display
+"marker-"
: add any number of sub-classification
Must have same keys as "names" keys, in the form subclassification name = color
, with color specified as a matplotlib named color, an RGB list or an hex code.
"marker+"
: classification name = matplotlib color
+"marker-"
: must have the same entries as "names".
Information related to hemispheres, same structure as channels
+Left
: Left = name to display
+Right
: Right = name to display
Must have same keys as names' keys
+Left
: ff516e" # Left = matplotlib color (either #hex, color name or RGB list)
+Right
: 960010" # Right = matplotlib color
Spatial distributions parameters
+stereo
: use stereotaxic coordinates (as in Paxinos, only for mouse brain CCFv3)
+ap_lim
: bins limits for anterio-posterior in mm
+ap_nbins
: number of bins for anterio-posterior
+dv_lim
: bins limits for dorso-ventral in mm
+dv_nbins
: number of bins for dorso-ventral
+ml_lim
: bins limits for medio-lateral in mm
+ml_nbins
: number of bins for medio-lateral
+hue
: color curves with this parameter, must be "hemisphere" or "channel"
+hue_filter
: use only a subset of data
common_norm
: use a global normalization (eg. the sum of areas under all curves is 1). Otherwise, normalize each hue individually
Display parameters
+show_injection
: add a patch showing the extent of injection sites. Uses corresponding channel colors. Requires the information TOML configuration file set up
+cmap
: matplotlib color map for 2D heatmaps
+cmap_nbins
: number of bins for 2D heatmaps
+cmap_lim
: color limits for 2D heatmaps
Distributions per regions parameters
+base_measurement
: the name of the measurement in QuPath to derive others from. Usually "Count" or "Length µm"
+hue
: color bars with this parameter, must be "hemisphere" or "channel"
+hue_filter
: use only a subset of data
hue_mirror
: plot two hue_filter in mirror instead of discarding the others. For example, if hue=channel and hue_filter="both", plots the two hemisphere in mirror.
+normalize_starter_cells
: normalize non-relative metrics by the number of starter cells
Names of metrics. The keys are used internally in cuisto as is so should NOT be modified. The values will only chang etheir names in the ouput file
+"density µm^-2"
: relevant name
+"density mm^-2"
: relevant name
+"coverage index"
: relevant name
+"relative measurement"
: relevant name
+"relative density"
: relevant name
nregions
: number of regions to display (sorted by max.)
+orientation
: orientation of the bars ("h" or "v")
+order
: order the regions by "ontology" or by "max". Set to "max" to provide a custom order
+dodge
: enforce the bar not being stacked
+log_scale
: use log. scale for metrics
name of metrics to display
+"count"
: real_name = display_name, with real_name the "values" in [regions.metrics]
+"density mm^-2"
Full path to information TOML files and atlas outlines for 2D heatmaps.
+blacklist
+fusion
+outlines
+infos
config module, part of cuisto.
+Contains the Config class.
+ + + + + + + + +
Config(config_file)
+
+#The configuration class.
+Reads input configuration file and provides its constant.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ config_file
+ |
+
+ str
+ |
+
+
+
+ Full path to the configuration file to load. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
cfg |
+ Config object.
+ |
+
+
+
+
+ |
+
Constructor.
+ + + + + + +cuisto/config.py
get_blacklist()
+
+#
get_hue_palette(mode)
+
+#Get color palette given hue.
+Maps hue to colors in channels or hemispheres.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ mode
+ |
+
+ (hemisphere, channel)
+ |
+
+
+
+
+ |
+
+ "hemisphere"
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
palette |
+ dict
+ |
+
+
+
+ Maps a hue level to a color, usable in seaborn. + |
+
cuisto/config.py
get_injection_sites(animals)
+
+#Get list of injection sites coordinates for each animals, for each channels.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ animals
+ |
+
+ list of str
+ |
+
+
+
+ List of animals. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
injection_sites |
+ dict
+ |
+
+
+
+ {"x": {channel0: [x]}, "y": {channel1: [y]}} + |
+
cuisto/config.py
display module, part of cuisto.
+Contains display functions, essentially wrapping matplotlib and seaborn functions.
+ + + + + + + + +
add_data_coverage(df, ax, colors=None, **kwargs)
+
+#Add lines below the plot to represent data coverage.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame with |
+ + required + | +
+ ax
+ |
+
+ Axes
+ |
+
+
+
+ Handle to axes where to add the patch. + |
+ + required + | +
+ colors
+ |
+
+ list or str or None
+ |
+
+
+
+ Colors for the patches, as a RGB list or hex list. Should be the same size as
+the number of patches to plot, eg. the number of columns in |
+
+ None
+ |
+
+ **kwargs
+ |
+
+ passed to patches.Rectangle()
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ Axes
+ |
+
+
+
+ Handle to updated axes. + |
+
cuisto/display.py
46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 |
|
add_injection_patch(X, ax, **kwargs)
+
+#Add a patch representing the injection sites.
+The patch will span from the minimal coordinate to the maximal. +If plotted in stereotaxic coordinates, coordinates should be converted beforehand.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ X
+ |
+
+ list
+ |
+
+
+
+ Coordinates in mm for each animals. Can be empty to not plot anything. + |
+ + required + | +
+ ax
+ |
+
+ Axes
+ |
+
+
+
+ Handle to axes where to add the patch. + |
+ + required + | +
+ **kwargs
+ |
+
+ passed to Axes.axvspan
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ Axes
+ |
+
+
+
+ Handle to updated Axes. + |
+
cuisto/display.py
draw_structure_outline(view='sagittal', structures=['root'], outline_file='', ax=None, microns=False, **kwargs)
+
+#Plot brain regions outlines in given projection.
+This requires a file containing the structures outlines.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ view
+ |
+
+ str
+ |
+
+
+
+ Projection, "sagittal", "coronal" or "top". Default is "sagittal". + |
+
+ 'sagittal'
+ |
+
+ structures
+ |
+
+ list[str]
+ |
+
+
+
+ List of structures acronyms whose outlines will be drawn. Default is ["root"]. + |
+
+ ['root']
+ |
+
+ outline_file
+ |
+
+ str
+ |
+
+
+
+ Full path the outlines HDF5 file. + |
+
+ ''
+ |
+
+ ax
+ |
+
+ Axes or None
+ |
+
+
+
+ Axes where to plot the outlines. If None, get current axes (the default). + |
+
+ None
+ |
+
+ microns
+ |
+
+ bool
+ |
+
+
+
+ If False (default), converts the coordinates in mm. + |
+
+ False
+ |
+
+ **kwargs
+ |
+
+ passed to pyplot.plot()
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ Axes
+ |
+
+
+
+
+ |
+
cuisto/display.py
111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 |
|
nice_bar_plot(df, x='', y=[''], hue='', ylabel=[''], orient='h', nx=None, ordering=None, names_list=None, hue_mirror=False, log_scale=False, bar_kws={}, pts_kws={})
+
+#Nice bar plot of per-region objects distribution.
+This is used for objects distribution across brain regions. Shows the y
metric
+(count, aeral density, cumulated length...) in each x
categories (brain regions).
+orient
controls wether the bars are shown horizontally (default) or vertically.
+Input df
must have an additional "hemisphere" column. All y
are plotted in the
+same figure as different subplots. nx
controls the number of displayed regions.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ x
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ ''
+ |
+
+ y
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ ''
+ |
+
+ hue
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ ''
+ |
+
+ ylabel
+ |
+
+ list of str
+ |
+
+
+
+ Y axis labels. + |
+
+ ['']
+ |
+
+ orient
+ |
+
+ h or v
+ |
+
+
+
+ "h" for horizontal bars (default) or "v" for vertical bars. + |
+
+ 'h'
+ |
+
+ nx
+ |
+
+ None or int
+ |
+
+
+
+ Number of |
+
+ None
+ |
+
+ ordering
+ |
+
+ None or list[str] or max
+ |
+
+
+
+ Sorted list of acronyms. Data will be sorted follwowing this order, if "max", +sorted by descending values, if None, not sorted (default). + |
+
+ None
+ |
+
+ names_list
+ |
+
+ list or None
+ |
+
+
+
+ List of names to display. If None (default), takes the most prominent overall +ones. + |
+
+ None
+ |
+
+ hue_mirror
+ |
+
+ bool
+ |
+
+
+
+ If there are 2 groups, plot in mirror. Default is False. + |
+
+ False
+ |
+
+ log_scale
+ |
+
+ bool
+ |
+
+
+
+ Set the metrics in log scale. Default is False. + |
+
+ False
+ |
+
+ bar_kws
+ |
+
+ dict
+ |
+
+
+
+ Passed to seaborn.barplot(). + |
+
+ {}
+ |
+
+ pts_kws
+ |
+
+ dict
+ |
+
+
+
+ Passed to seaborn.stripplot(). + |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
figs |
+ list
+ |
+
+
+
+ List of figures. + |
+
cuisto/display.py
178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 |
|
nice_distribution_plot(df, x='', y='', hue=None, xlabel='', ylabel='', injections_sites={}, channel_colors={}, channel_names={}, ax=None, **kwargs)
+
+#Nice plot of 1D distribution of objects.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ x
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ y
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ hue
+ |
+
+ str or None
+ |
+
+
+
+ Key in |
+
+ None
+ |
+
+ xlabel
+ |
+
+ str
+ |
+
+
+
+ X and Y axes labels. + |
+
+ ''
+ |
+
+ ylabel
+ |
+
+ str
+ |
+
+
+
+ X and Y axes labels. + |
+
+ ''
+ |
+
+ injections_sites
+ |
+
+ dict
+ |
+
+
+
+ List of injection sites 1D coordinates in a dict with the channel name as key. +If empty, injection site is not plotted (default). + |
+
+ {}
+ |
+
+ channel_colors
+ |
+
+ dict
+ |
+
+
+
+ Required if injections_sites is not empty, dict mapping channel names to a +color. + |
+
+ {}
+ |
+
+ channel_names
+ |
+
+ dict
+ |
+
+
+
+ Required if injections_sites is not empty, dict mapping channel names to a +display name. + |
+
+ {}
+ |
+
+ ax
+ |
+
+ Axes or None
+ |
+
+
+
+ Axes in which to plot the figure, if None, a new figure is created (default). + |
+
+ None
+ |
+
+ **kwargs
+ |
+
+ passed to seaborn.lineplot()
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ matplotlib axes
+ |
+
+
+
+ Handle to axes. + |
+
cuisto/display.py
416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 |
|
nice_heatmap(df, animals, x='', y='', xlabel='', ylabel='', invertx=False, inverty=False, **kwargs)
+
+#Nice plots of 2D distribution of boutons as a heatmap per animal.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ animals
+ |
+
+ list-like of str
+ |
+
+
+
+ List of animals. + |
+ + required + | +
+ x
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ y
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ xlabel
+ |
+
+ str
+ |
+
+
+
+ Labels of x and y axes. + |
+
+ ''
+ |
+
+ ylabel
+ |
+
+ str
+ |
+
+
+
+ Labels of x and y axes. + |
+
+ ''
+ |
+
+ invertx
+ |
+
+ bool
+ |
+
+
+
+ Wether to inverse the x or y axes. Default is False. + |
+
+ False
+ |
+
+ inverty
+ |
+
+ bool
+ |
+
+
+
+ Wether to inverse the x or y axes. Default is False. + |
+
+ False
+ |
+
+ **kwargs
+ |
+
+ passed to seaborn.histplot()
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ Axes or list of Axes
+ |
+
+
+
+ Handle to axes. + |
+
cuisto/display.py
nice_joint_plot(df, x='', y='', xlabel='', ylabel='', invertx=False, inverty=False, outline_kws={}, ax=None, **kwargs)
+
+#Joint distribution.
+Used to display a 2D heatmap of objects. This is more qualitative than quantitative, +for display purposes.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ x
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ y
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ xlabel
+ |
+
+ str
+ |
+
+
+
+ Label of x and y axes. + |
+
+ ''
+ |
+
+ ylabel
+ |
+
+ str
+ |
+
+
+
+ Label of x and y axes. + |
+
+ ''
+ |
+
+ invertx
+ |
+
+ bool
+ |
+
+
+
+ Whether to inverse the x or y axes. Default is False for both. + |
+
+ False
+ |
+
+ inverty
+ |
+
+ bool
+ |
+
+
+
+ Whether to inverse the x or y axes. Default is False for both. + |
+
+ False
+ |
+
+ outline_kws
+ |
+
+ dict
+ |
+
+
+
+ Passed to draw_structure_outline(). + |
+
+ {}
+ |
+
+ ax
+ |
+
+ Axes or None
+ |
+
+
+
+ Axes to plot in. If None, draws in current axes (default). + |
+
+ None
+ |
+
+ **kwargs
+ |
+ + | +
+
+
+ Passed to seaborn.histplot. + |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ Axes
+ |
+
+
+
+
+ |
+
cuisto/display.py
492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 |
|
plot_1D_distributions(dfs_distributions, cfg, df_coordinates=None)
+
+#Wraps nice_distribution_plot().
+ +cuisto/display.py
672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 +717 +718 +719 +720 +721 +722 +723 +724 +725 +726 +727 +728 +729 +730 +731 +732 +733 +734 +735 +736 +737 +738 +739 +740 +741 +742 +743 +744 +745 +746 +747 +748 +749 +750 +751 +752 +753 +754 +755 +756 +757 +758 +759 +760 +761 +762 +763 +764 |
|
plot_2D_distributions(df, cfg)
+
+#Wraps nice_joint_plot().
+ +cuisto/display.py
767 +768 +769 +770 +771 +772 +773 +774 +775 +776 +777 +778 +779 +780 +781 +782 +783 +784 +785 +786 +787 +788 +789 +790 +791 +792 +793 +794 +795 +796 +797 +798 +799 +800 +801 +802 +803 +804 +805 +806 +807 +808 +809 +810 +811 +812 +813 +814 +815 +816 +817 +818 +819 +820 +821 +822 +823 +824 +825 +826 +827 +828 +829 +830 +831 +832 +833 +834 +835 +836 +837 +838 +839 +840 +841 +842 +843 +844 +845 +846 +847 +848 +849 +850 +851 +852 +853 +854 +855 +856 +857 +858 +859 +860 +861 +862 +863 +864 +865 +866 +867 +868 +869 +870 +871 +872 +873 +874 +875 +876 +877 +878 +879 +880 +881 +882 +883 +884 +885 +886 +887 +888 +889 +890 +891 +892 |
|
plot_regions(df, cfg, **kwargs)
+
+#Wraps nice_bar_plot().
+ +cuisto/display.py
io module, part of cuisto.
+Contains loading and saving functions.
+ + + + + + + + +
cat_csv_dir(directory, **kwargs)
+
+#Scans a directory for csv files and concatenate them into a single DataFrame.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ directory
+ |
+
+ str
+ |
+
+
+
+ Path to the directory to scan. + |
+ + required + | +
+ **kwargs
+ |
+
+ passed to pandas.read_csv()
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ All CSV files concatenated in a single DataFrame. + |
+
cuisto/io.py
cat_data_dir(directory, segtype, **kwargs)
+
+#Wraps either cat_csv_dir() or cat_json_dir() depending on segtype
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ directory
+ |
+
+ str
+ |
+
+
+
+ Path to the directory to scan. + |
+ + required + | +
+ segtype
+ |
+
+ str
+ |
+
+
+
+ "synaptophysin" or "fibers". + |
+ + required + | +
+ **kwargs
+ |
+
+ passed to cat_csv_dir() or cat_json_dir().
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ All files concatenated in a single DataFrame. + |
+
cuisto/io.py
cat_json_dir(directory, hemisphere_names, atlas)
+
+#Scans a directory for json files and concatenate them in a single DataFrame.
+The json files must be generated with 'workflow_import_export.groovy" from a QuPath +project.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ directory
+ |
+
+ str
+ |
+
+
+
+ Path to the directory to scan. + |
+ + required + | +
+ hemisphere_names
+ |
+
+ dict
+ |
+
+
+
+ Maps between hemisphere names in the json files ("Right" and "Left") to +something else (eg. "Ipsi." and "Contra."). + |
+ + required + | +
+ atlas
+ |
+
+ BrainGlobeAtlas
+ |
+
+
+
+ Atlas to read regions from. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ All JSON files concatenated in a single DataFrame. + |
+
cuisto/io.py
114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 |
|
check_empty_file(filename, threshold=1)
+
+#Checks if a file is empty.
+Empty is defined as a file whose number of lines is lower than or equal to
+threshold
(to allow for headers).
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ filename
+ |
+
+ str
+ |
+
+
+
+ Full path to the file to check. + |
+ + required + | +
+ threshold
+ |
+
+ int
+ |
+
+
+
+ If number of lines is lower than or equal to this value, it is considered as +empty. Default is 1. + |
+
+ 1
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
empty |
+ bool
+ |
+
+
+
+ True if the file is empty as defined above. + |
+
cuisto/io.py
get_measurements_directory(wdir, animal, kind, segtype)
+
+#Get the directory with detections or annotations measurements for given animal ID.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ wdir
+ |
+
+ str
+ |
+
+
+
+ Base working directory. + |
+ + required + | +
+ animal
+ |
+
+ str
+ |
+
+
+
+ Animal ID. + |
+ + required + | +
+ kind
+ |
+
+ str
+ |
+
+
+
+ "annotation" or "detection". + |
+ + required + | +
+ segtype
+ |
+
+ str
+ |
+
+
+
+ Type of segmentation, eg. "synaptophysin". + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
directory |
+ str
+ |
+
+
+
+ Path to detections or annotations directory. + |
+
cuisto/io.py
load_dfs(filepath, fmt, identifiers=['df_regions', 'df_coordinates', 'df_distribution_ap', 'df_distribution_dv', 'df_distribution_ml'])
+
+#Load DataFrames from file.
+If fmt
is "h5" ("xslx"), identifiers are interpreted as h5 group identifier (sheet
+name, respectively).
+If fmt
is "pickle", "csv" or "tsv", identifiers are appended to filename
.
+Path to the file can't have a dot (".") in it.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ filepath
+ |
+
+ str
+ |
+
+
+
+ Full path to the file(s), without extension. + |
+ + required + | +
+ fmt
+ |
+
+ (h5, csv, pickle, xlsx)
+ |
+
+
+
+ File(s) format. + |
+
+ "h5"
+ |
+
+ identifiers
+ |
+
+ list of str
+ |
+
+
+
+ List of identifiers to load from files. Defaults to the ones saved in +cuisto.process.process_animals(). + |
+
+ ['df_regions', 'df_coordinates', 'df_distribution_ap', 'df_distribution_dv', 'df_distribution_ml']
+ |
+
Returns:
+Type | +Description | +
---|---|
+ All requested DataFrames.
+ |
+
+
+
+
+ |
+
cuisto/io.py
save_dfs(out_dir, filename, dfs)
+
+#Save DataFrames to file.
+File format is inferred from file name extension.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ out_dir
+ |
+
+ str
+ |
+
+
+
+ Output directory. + |
+ + required + | +
+ filename
+ |
+
+ _type_
+ |
+
+
+
+ File name. + |
+ + required + | +
+ dfs
+ |
+
+ dict
+ |
+
+
+
+ DataFrames to save, as {identifier: df}. If HDF5 or xlsx, all df are saved in +the same file, otherwise identifier is appended to the file name. + |
+ + required + | +
cuisto/io.py
process module, part of cuisto.
+Wraps other functions for a click&play behaviour. Relies on the configuration file.
+ + + + + + + + +
process_animal(animal, df_annotations, df_detections, cfg, compute_distributions=True)
+
+#Quantify objects for one animal.
+Fetch required files and compute objects' distributions in brain regions, spatial +distributions and gather Atlas coordinates.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ animal
+ |
+
+ str
+ |
+
+
+
+ Animal ID. + |
+ + required + | +
+ df_annotations
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrames of QuPath Annotations and Detections. + |
+ + required + | +
+ df_detections
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrames of QuPath Annotations and Detections. + |
+ + required + | +
+ cfg
+ |
+
+ Config
+ |
+
+
+
+ The configuration loaded from TOML configuration file. + |
+ + required + | +
+ compute_distributions
+ |
+
+ bool
+ |
+
+
+
+ If False, do not compute the 1D distributions and return an empty list.Default +is True. + |
+
+ True
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df_regions |
+ DataFrame
+ |
+
+
+
+ Metrics in brain regions. One entry for each hemisphere of each brain regions. + |
+
df_distribution |
+ list of pandas.DataFrame
+ |
+
+
+
+ Rostro-caudal distribution, as raw count and probability density function, in +each axis. + |
+
df_coordinates |
+ DataFrame
+ |
+
+
+
+ Atlas coordinates of each points. + |
+
cuisto/process.py
15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 |
|
process_animals(wdir, animals, cfg, out_fmt=None, compute_distributions=True)
+
+#Get data from all animals and plot.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ wdir
+ |
+
+ str
+ |
+
+
+
+ Base working directory, containing |
+ + required + | +
+ animals
+ |
+
+ list-like of str
+ |
+
+
+
+ List of animals ID. + |
+ + required + | +
+ cfg
+ |
+ + | +
+
+
+ Configuration object. + |
+ + required + | +
+ out_fmt
+ |
+
+ (None, h5, csv, tsv, xslx, pickle)
+ |
+
+
+
+ Output file(s) format, if None, nothing is saved (default). + |
+
+ None
+ |
+
+ compute_distributions
+ |
+
+ bool
+ |
+
+
+
+ If False, do not compute the 1D distributions and return an empty list.Default +is True. + |
+
+ True
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df_regions |
+ DataFrame
+ |
+
+
+
+ Metrics in brain regions. One entry for each hemisphere of each brain regions. + |
+
df_distribution |
+ list of pandas.DataFrame
+ |
+
+
+
+ Rostro-caudal distribution, as raw count and probability density function, in +each axis. + |
+
df_coordinates |
+ DataFrame
+ |
+
+
+
+ Atlas coordinates of each points. + |
+
cuisto/process.py
172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 |
|
Template to show how to run groovy script with QuPath, multi-threaded.
+ + + + + + + + +
EXCLUDE_LIST = []
+
+
+ module-attribute
+
+
+#Images names to NOT run the script on.
+
NTHREADS = 5
+
+
+ module-attribute
+
+
+#Number of threads to use.
+
QPROJ_PATH = '/path/to/qupath/project.qproj'
+
+
+ module-attribute
+
+
+#Full path to the QuPath project.
+
QUIET = True
+
+
+ module-attribute
+
+
+#Use QuPath in quiet mode, eg. with minimal verbosity.
+
QUPATH_EXE = '/path/to/the/qupath/QuPath-0.5.1 (console).exe'
+
+
+ module-attribute
+
+
+#Path to the QuPath executable (console mode).
+
SAVE = True
+
+
+ module-attribute
+
+
+#Whether to save the project after the script ran on an image.
+
SCRIPT_PATH = '/path/to/the/script.groovy'
+
+
+ module-attribute
+
+
+#Path to the groovy script.
+Script to segment objects from images.
+For fiber-like objects, binarize and skeletonize the image, then use skan
to extract
+branches coordinates.
+For polygon-like objects, binarize the image and detect objects and extract contours
+coordinates.
+For points, treat that as polygons then extract the centroids instead of contours.
+Finally, export the coordinates as collections in geojson files, importable in QuPath.
+Supports any number of channel of interest within the same image. One file output file
+per channel will be created.
This script uses cuisto.seg
. It is designed to work on probability maps generated
+from a pixel classifier in QuPath, but might work on raw images.
Usage : fill-in the Parameters section of the script and run it.
+A "geojson" folder will be created in the parent directory of IMAGES_DIR
.
+To exclude objects near the edges of an ROI, specify the path to masks stored as images
+with the same names as probabilities images (without their suffix).
author : Guillaume Le Goc (g.legoc@posteo.org) @ NeuroPSI +version : 2024.12.10
+ + + + + + + + +
CHANNELS_PARAMS = [{'name': 'cy5', 'target_channel': 0, 'proba_threshold': 0.85, 'qp_class': 'Fibers: Cy5', 'qp_color': [164, 250, 120]}, {'name': 'dsred', 'target_channel': 1, 'proba_threshold': 0.65, 'qp_class': 'Fibers: DsRed', 'qp_color': [224, 153, 18]}, {'name': 'egfp', 'target_channel': 2, 'proba_threshold': 0.85, 'qp_class': 'Fibers: EGFP', 'qp_color': [135, 11, 191]}]
+
+
+ module-attribute
+
+
+#This should be a list of dictionary (one per channel) with keys :
+
EDGE_DIST = 0
+
+
+ module-attribute
+
+
+#Distance to brain edge to ignore, in µm. 0 to disable.
+
FILTERS = {'length_low': 1.5, 'area_low': 10, 'area_high': 1000, 'ecc_low': 0.0, 'ecc_high': 0.9, 'dist_thresh': 30}
+
+
+ module-attribute
+
+
+#Dictionary with keys :
+
IMAGES_DIR = '/path/to/images'
+
+
+ module-attribute
+
+
+#Full path to the images to segment.
+
IMG_SUFFIX = '_Probabilities.tiff'
+
+
+ module-attribute
+
+
+#Images suffix, including extension. Masks must be the same name without the suffix.
+
MASKS_DIR = 'path/to/corresponding/masks'
+
+
+ module-attribute
+
+
+#Full path to the masks, to exclude objects near the brain edges (set to None or empty +string to disable this feature).
+
MASKS_EXT = 'tiff'
+
+
+ module-attribute
+
+
+#Masks files extension.
+
MAX_PIX_VALUE = 255
+
+
+ module-attribute
+
+
+#Maximum pixel possible value to adjust proba_threshold
.
ORIGINAL_PIXELSIZE = 0.45
+
+
+ module-attribute
+
+
+#Original images pixel size in microns. This is in case the pixel classifier uses +a lower resolution, yielding smaller probability maps, so output objects coordinates +need to be rescaled to the full size images. The pixel size is written in the "Image" +tab in QuPath.
+
QUPATH_TYPE = 'detection'
+
+
+ module-attribute
+
+
+#QuPath object type.
+
SEGTYPE = 'boutons'
+
+
+ module-attribute
+
+
+#Type of segmentation.
+
get_geojson_dir(images_dir)
+
+#Get the directory of geojson files, which will be in the parent directory
+of images_dir
.
If the directory does not exist, create it.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ images_dir
+ |
+
+ str
+ |
+
+
+
+
+ |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
geojson_dir |
+ str
+ |
+
+
+
+
+ |
+
scripts/segmentation/segment_images.py
get_geojson_properties(name, color, objtype='detection')
+
+#Return geojson objects properties as a dictionnary, ready to be used in geojson.Feature.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ name
+ |
+
+ str
+ |
+
+
+
+ Classification name. + |
+ + required + | +
+ color
+ |
+
+ tuple or list
+ |
+
+
+
+ Classification color in RGB (3-elements vector). + |
+ + required + | +
+ objtype
+ |
+
+ str
+ |
+
+
+
+ Object type ("detection" or "annotation"). Default is "detection". + |
+
+ 'detection'
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
props |
+ dict
+ |
+
+
+
+
+ |
+
scripts/segmentation/segment_images.py
get_seg_method(segtype)
+
+#Determine what kind of segmentation is performed.
+Segmentation kind are, for now, lines, polygons or points. We detect that based on +hardcoded keywords.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ segtype
+ |
+
+ str
+ |
+
+
+
+
+ |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
seg_method |
+ str
+ |
+
+
+
+
+ |
+
scripts/segmentation/segment_images.py
parameters_as_dict(images_dir, masks_dir, segtype, name, proba_threshold, edge_dist)
+
+#Get information as a dictionnary.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ images_dir
+ |
+
+ str
+ |
+
+
+
+ Path to images to be segmented. + |
+ + required + | +
+ masks_dir
+ |
+
+ str
+ |
+
+
+
+ Path to images masks. + |
+ + required + | +
+ segtype
+ |
+
+ str
+ |
+
+
+
+ Segmentation type (eg. "fibers"). + |
+ + required + | +
+ name
+ |
+
+ str
+ |
+
+
+
+ Name of the segmentation (eg. "green"). + |
+ + required + | +
+ proba_threshold
+ |
+
+ float < 1
+ |
+
+
+
+ Probability threshold. + |
+ + required + | +
+ edge_dist
+ |
+
+ float
+ |
+
+
+
+ Distance in µm to the brain edge that is ignored. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
params |
+ dict
+ |
+
+
+
+
+ |
+
scripts/segmentation/segment_images.py
process_directory(images_dir, img_suffix='', segtype='', original_pixelsize=1.0, target_channel=0, proba_threshold=0.0, qupath_class='Object', qupath_color=[0, 0, 0], channel_suffix='', edge_dist=0.0, filters={}, masks_dir='', masks_ext='')
+
+#Main function, processes the .ome.tiff files in the input directory.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ images_dir
+ |
+
+ str
+ |
+
+
+
+ Animal ID to process. + |
+ + required + | +
+ img_suffix
+ |
+
+ str
+ |
+
+
+
+ Images suffix, including extension. + |
+
+ ''
+ |
+
+ segtype
+ |
+
+ str
+ |
+
+
+
+ Segmentation type. + |
+
+ ''
+ |
+
+ original_pixelsize
+ |
+
+ float
+ |
+
+
+
+ Original images pixel size in microns. + |
+
+ 1.0
+ |
+
+ target_channel
+ |
+
+ int
+ |
+
+
+
+ Index of the channel containning the objects of interest (eg. not the +background), in the probability map (not the original images channels). + |
+
+ 0
+ |
+
+ proba_threshold
+ |
+
+ float < 1
+ |
+
+
+
+ Probability below this value will be discarded (multiplied by |
+
+ 0.0
+ |
+
+ qupath_class
+ |
+
+ str
+ |
+
+
+
+ Name of the QuPath classification. + |
+
+ 'Object'
+ |
+
+ qupath_color
+ |
+
+ list of three elements
+ |
+
+
+
+ Color associated to that classification in RGB. + |
+
+ [0, 0, 0]
+ |
+
+ channel_suffix
+ |
+
+ str
+ |
+
+
+
+ Channel name, will be used as a suffix in output geojson files. + |
+
+ ''
+ |
+
+ edge_dist
+ |
+
+ float
+ |
+
+
+
+ Distance to the edge of the brain masks that will be ignored, in microns. Set to +0 to disable this feature. + |
+
+ 0.0
+ |
+
+ filters
+ |
+
+ dict
+ |
+
+
+
+ Filters values to include or excludes objects. See the top of the script. + |
+
+ {}
+ |
+
+ masks_dir
+ |
+
+ str
+ |
+
+
+
+ Path to images masks, to exclude objects found near the edges. The masks must be +with the same name as the corresponding image to be segmented, without its +suffix. Default is "", which disables this feature. + |
+
+ ''
+ |
+
+ masks_ext
+ |
+
+ str
+ |
+
+
+
+ Masks files extension, without leading ".". Default is "" + |
+
+ ''
+ |
+
scripts/segmentation/segment_images.py
281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 |
|
write_parameters(outfile, parameters, filters, original_pixelsize)
+
+#Write parameters to outfile
.
A timestamp will be added. Parameters are written as key = value, +and a [filters] is added before filters parameters.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ outfile
+ |
+
+ str
+ |
+
+
+
+ Full path to the output file. + |
+ + required + | +
+ parameters
+ |
+
+ dict
+ |
+
+
+
+ General parameters. + |
+ + required + | +
+ filters
+ |
+
+ dict
+ |
+
+
+
+ Filters parameters. + |
+ + required + | +
+ original_pixelsize
+ |
+
+ float
+ |
+
+
+
+ Size of pixels in original image. + |
+ + required + | +
scripts/segmentation/segment_images.py
seg module, part of cuisto.
+Functions for segmentating probability map stored as an image.
+ + + + + + + + +
convert_to_pixels(filters, pixelsize)
+
+#Convert some values in filters
in pixels.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ filters
+ |
+
+ dict
+ |
+
+
+
+ Must contain the keys used below. + |
+ + required + | +
+ pixelsize
+ |
+
+ float
+ |
+
+
+
+ Pixel size in microns. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
filters |
+ dict
+ |
+
+
+
+ Same as input, with values in pixels. + |
+
cuisto/seg.py
erode_mask(mask, edge_dist)
+
+#Erode the mask outline so that is is edge_dist
smaller from the border.
This allows discarding the edges.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ mask
+ |
+
+ ndarray
+ |
+
+
+
+
+ |
+ + required + | +
+ edge_dist
+ |
+
+ float
+ |
+
+
+
+ Distance to edges, in pixels. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
eroded_mask |
+ ndarray of bool
+ |
+
+
+
+
+ |
+
cuisto/seg.py
get_collection_from_points(coords, properties, rescale_factor=1.0, offset=0.5)
+
+#Gather coordinates from coords
and put them in GeoJSON format.
An entry in coords
are pairs of (x, y) coordinates defining the point.
+properties
is a dictionnary with QuPath properties of each detections.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ coords
+ |
+
+ list
+ |
+
+
+
+
+ |
+ + required + | +
+ properties
+ |
+
+ dict
+ |
+
+
+
+
+ |
+ + required + | +
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1.0
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+
+ |
+
cuisto/seg.py
get_collection_from_poly(contours, properties, rescale_factor=1.0, offset=0.5)
+
+#Gather coordinates in the list and put them in GeoJSON format as Polygons.
+An entry in contours
must define a closed polygon. properties
is a dictionnary
+with QuPath properties of each detections.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ contours
+ |
+
+ list
+ |
+
+
+
+
+ |
+ + required + | +
+ properties
+ |
+
+ dict
+ |
+
+
+
+ QuPatj objects' properties. + |
+ + required + | +
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1.0
+ |
+
+ offset
+ |
+
+ float
+ |
+
+
+
+ Shift coordinates by this amount, typically to get pixel centers or edges. +Default is 0.5. + |
+
+ 0.5
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+ A FeatureCollection ready to be written as geojson. + |
+
cuisto/seg.py
get_collection_from_skel(skeleton, properties, rescale_factor=1.0, offset=0.5)
+
+#Get the coordinates of each skeleton path as a GeoJSON Features in a
+FeatureCollection.
+properties
is a dictionnary with QuPath properties of each detections.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ skeleton
+ |
+
+ Skeleton
+ |
+
+
+
+
+ |
+ + required + | +
+ properties
+ |
+
+ dict
+ |
+
+
+
+ QuPatj objects' properties. + |
+ + required + | +
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1.0
+ |
+
+ offset
+ |
+
+ float
+ |
+
+
+
+ Shift coordinates by this amount, typically to get pixel centers or edges. +Default is 0.5. + |
+
+ 0.5
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+ A FeatureCollection ready to be written as geojson. + |
+
cuisto/seg.py
get_image_skeleton(img, minsize=0)
+
+#Get the image skeleton.
+Computes the image skeleton and removes objects smaller than minsize
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ img
+ |
+
+ ndarray of bool
+ |
+
+
+
+
+ |
+ + required + | +
+ minsize
+ |
+
+ number
+ |
+
+
+
+ Min. size the object can have, as a number of pixels. Default is 0. + |
+
+ 0
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
skel |
+ ndarray of bool
+ |
+
+
+
+ Binary image with 1-pixel wide skeleton. + |
+
cuisto/seg.py
get_pixelsize(image_name)
+
+#Get pixel size recorded in image_name
TIFF metadata.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ image_name
+ |
+
+ str
+ |
+
+
+
+ Full path to image. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
pixelsize |
+ float
+ |
+
+
+
+ Pixel size in microns. + |
+
cuisto/seg.py
pad_image(img, finalsize)
+
+#Pad image with zeroes to match expected final size.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ img
+ |
+
+ ndarray
+ |
+
+
+
+
+ |
+ + required + | +
+ finalsize
+ |
+
+ tuple or list
+ |
+
+
+
+ nrows, ncolumns + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
imgpad |
+ ndarray
+ |
+
+
+
+ img with black borders. + |
+
cuisto/seg.py
segment_lines(img, geojson_props, minsize=0.0, rescale_factor=1.0)
+
+#Wraps skeleton analysis to get paths coordinates.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ img
+ |
+
+ ndarray of bool
+ |
+
+
+
+ Binary image to segment as lines. + |
+ + required + | +
+ geojson_props
+ |
+
+ dict
+ |
+
+
+
+ GeoJSON properties of objects. + |
+ + required + | +
+ minsize
+ |
+
+ float
+ |
+
+
+
+ Minimum size in pixels for an object. + |
+
+ 0.0
+ |
+
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1.0
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+ A FeatureCollection ready to be written as geojson. + |
+
cuisto/seg.py
segment_points(img, geojson_props, area_min=0.0, area_max=np.inf, ecc_min=0, ecc_max=1, dist_thresh=0, rescale_factor=1)
+
+#Point segmentation.
+First, segment polygons to apply shape filters, then extract their centroids,
+and remove isolated points as defined by dist_thresh
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ img
+ |
+
+ ndarray of bool
+ |
+
+
+
+ Binary image to segment as points. + |
+ + required + | +
+ geojson_props
+ |
+
+ dict
+ |
+
+
+
+ GeoJSON properties of objects. + |
+ + required + | +
+ area_min
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum area in pixels for an object. + |
+
+ 0.0
+ |
+
+ area_max
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum area in pixels for an object. + |
+
+ 0.0
+ |
+
+ ecc_min
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum eccentricity for an object. + |
+
+ 0
+ |
+
+ ecc_max
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum eccentricity for an object. + |
+
+ 0
+ |
+
+ dist_thresh
+ |
+
+ float
+ |
+
+
+
+ Maximal distance in pixels between objects before considering them as isolated and remove them. +0 disables it. + |
+
+ 0
+ |
+
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+ A FeatureCollection ready to be written as geojson. + |
+
cuisto/seg.py
362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 |
|
segment_polygons(img, geojson_props, area_min=0.0, area_max=np.inf, ecc_min=0.0, ecc_max=1.0, rescale_factor=1.0)
+
+#Polygon segmentation.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ img
+ |
+
+ ndarray of bool
+ |
+
+
+
+ Binary image to segment as polygons. + |
+ + required + | +
+ geojson_props
+ |
+
+ dict
+ |
+
+
+
+ GeoJSON properties of objects. + |
+ + required + | +
+ area_min
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum area in pixels for an object. + |
+
+ 0.0
+ |
+
+ area_max
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum area in pixels for an object. + |
+
+ 0.0
+ |
+
+ ecc_min
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum eccentricity for an object. + |
+
+ 0.0
+ |
+
+ ecc_max
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum eccentricity for an object. + |
+
+ 0.0
+ |
+
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1.0
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+ A FeatureCollection ready to be written as geojson. + |
+
cuisto/seg.py
utils module, part of cuisto.
+Contains utilities functions.
+ + + + + + + + +
add_brain_region(df, atlas, col='Parent')
+
+#Add brain region to a DataFrame with Atlas_X
, Atlas_Y
and Atlas_Z
columns.
This uses Brainglobe Atlas API to query the atlas. It does not use the +structure_from_coords() method, instead it manually converts the coordinates in +stack indices, then get the corresponding annotation id and query the corresponding +acronym -- because brainglobe-atlasapi is not vectorized at all.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame with atlas coordinates in microns. + |
+ + required + | +
+ atlas
+ |
+
+ BrainGlobeAtlas
+ |
+
+
+
+
+ |
+ + required + | +
+ col
+ |
+
+ str
+ |
+
+
+
+ Column in which to put the regions acronyms. Default is "Parent". + |
+
+ 'Parent'
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ Same DataFrame with a new "Parent" column. + |
+
cuisto/utils.py
add_channel(df, object_type, channel_names)
+
+#Add channel as a measurement for detections DataFrame.
+The channel is read from the Classification column, the latter having to be +formatted as "object_type: channel".
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame with detections measurements. + |
+ + required + | +
+ object_type
+ |
+
+ str
+ |
+
+
+
+ Object type (primary classification). + |
+ + required + | +
+ channel_names
+ |
+
+ dict
+ |
+
+
+
+ Map between original channel names to something else. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ DataFrame
+ |
+
+
+
+ Same DataFrame with a "channel" column. + |
+
cuisto/utils.py
add_hemisphere(df, hemisphere_names, midline=5700, col='Atlas_Z', atlas_type='brain')
+
+#Add hemisphere (left/right) as a measurement for detections or annotations.
+The hemisphere is read in the "Classification" column for annotations. The latter
+needs to be in the form "Right: Name" or "Left: Name". For detections, the input
+col
of df
is compared to midline
to assess if the object belong to the left or
+right hemispheres.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame with detections or annotations measurements. + |
+ + required + | +
+ hemisphere_names
+ |
+
+ dict
+ |
+
+
+
+ Map between "Left" and "Right" to something else. + |
+ + required + | +
+ midline
+ |
+
+ float
+ |
+
+
+
+ Used only for "detections" |
+
+ 5700
+ |
+
+ col
+ |
+
+ str
+ |
+
+
+
+ Name of the column containing the Z coordinate (medio-lateral) in microns. +Default is "Atlas_Z". + |
+
+ 'Atlas_Z'
+ |
+
+ atlas_type
+ |
+
+ (brain, cord)
+ |
+
+
+
+ Type of atlas used for registration. Required because the brain atlas is swapped +between left and right while the spinal cord atlas is not. Default is "brain". + |
+
+ "brain"
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ The same DataFrame with a new "hemisphere" column + |
+
cuisto/utils.py
267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 |
|
ccf_to_stereo(x_ccf, y_ccf, z_ccf=0)
+
+#Convert X, Y, Z coordinates in CCFv3 to stereotaxis coordinates (as in +Paxinos-Franklin atlas).
+Coordinates are shifted, rotated and squeezed, see (1) for more info. Input must be
+in mm.
+x_ccf
corresponds to the anterio-posterior (rostro-caudal) axis.
+y_ccf
corresponds to the dorso-ventral axis.
+z_ccf
corresponds to the medio-lateral axis (left-right) axis.
Warning : it is a rough estimation.
+(1) https://community.brain-map.org/t/how-to-transform-ccf-x-y-z-coordinates-into-stereotactic-coordinates/1858
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ x_ccf
+ |
+
+ floats or ndarray
+ |
+
+
+
+ Coordinates in CCFv3 space in mm. + |
+ + required + | +
+ y_ccf
+ |
+
+ floats or ndarray
+ |
+
+
+
+ Coordinates in CCFv3 space in mm. + |
+ + required + | +
+ z_ccf
+ |
+
+ float or ndarray
+ |
+
+
+
+ Coordinate in CCFv3 space in mm. Default is 0. + |
+
+ 0
+ |
+
Returns:
+Type | +Description | +
---|---|
+ ap, dv, ml : floats or np.ndarray
+ |
+
+
+
+ Stereotaxic coordinates in mm. + |
+
cuisto/utils.py
filter_df_classifications(df, filter_list, mode='keep', col='Classification')
+
+#Filter a DataFrame whether specified col
column entries contain elements in
+filter_list
. Case insensitive.
If mode
is "keep", keep entries only if their col
in is in the list (default).
+If mode
is "remove", remove entries if their col
is in the list.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ filter_list
+ |
+
+ list | tuple | str
+ |
+
+
+
+ List of words that should be present to trigger the filter. + |
+ + required + | +
+ mode
+ |
+
+ keep or remove
+ |
+
+
+
+ Keep or remove entries from the list. Default is "keep". + |
+
+ 'keep'
+ |
+
+ col
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ 'Classification'
+ |
+
Returns:
+Type | +Description | +
---|---|
+ DataFrame
+ |
+
+
+
+ Filtered DataFrame. + |
+
cuisto/utils.py
filter_df_regions(df, filter_list, mode='keep', col='Parent')
+
+#Filters entries in df
based on wether their col
is in filter_list
or not.
If mode
is "keep", keep entries only if their col
in is in the list (default).
+If mode
is "remove", remove entries if their col
is in the list.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ filter_list
+ |
+
+ list - like
+ |
+
+
+
+ List of regions to keep or remove from the DataFrame. + |
+ + required + | +
+ mode
+ |
+
+ keep or remove
+ |
+
+
+
+ Keep or remove entries from the list. Default is "keep". + |
+
+ 'keep'
+ |
+
+ col
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ 'Parent'
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ Filtered DataFrame. + |
+
cuisto/utils.py
get_blacklist(file, atlas)
+
+#Build a list of regions to exclude from file.
+File must be a TOML with [WITH_CHILDS] and [EXACT] sections.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ file
+ |
+
+ str
+ |
+
+
+
+ Full path the atlas_blacklist.toml file. + |
+ + required + | +
+ atlas
+ |
+
+ BrainGlobeAtlas
+ |
+
+
+
+ Atlas to extract regions from. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
black_list |
+ list
+ |
+
+
+
+ Full list of acronyms to discard. + |
+
cuisto/utils.py
get_data_coverage(df, col='Atlas_AP', by='animal')
+
+#Get min and max in col
for each by
.
Used to get data coverage for each animal to plot in distributions.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ description + |
+ + required + | +
+ col
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ 'Atlas_AP'
+ |
+
+ by
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ 'animal'
+ |
+
Returns:
+Type | +Description | +
---|---|
+ DataFrame
+ |
+
+
+
+ min and max of |
+
cuisto/utils.py
get_df_kind(df)
+
+#Get DataFrame kind, eg. Annotations or Detections.
+It is based on reading the Object Type of the first entry, so the DataFrame must +have only one kind of object.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
kind |
+ str
+ |
+
+
+
+ "detection" or "annotation". + |
+
cuisto/utils.py
get_injection_site(animal, info_file, channel, stereo=False)
+
+#Get the injection site coordinates associated with animal.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ animal
+ |
+
+ str
+ |
+
+
+
+ Animal ID. + |
+ + required + | +
+ info_file
+ |
+
+ str
+ |
+
+
+
+ Path to TOML info file. + |
+ + required + | +
+ channel
+ |
+
+ str
+ |
+
+
+
+ Channel ID as in the TOML file. + |
+ + required + | +
+ stereo
+ |
+
+ bool
+ |
+
+
+
+ Wether to convert coordinates in stereotaxis coordinates. Default is False. + |
+
+ False
+ |
+
Returns:
+Type | +Description | +
---|---|
+ x, y, z : floats
+ |
+
+
+
+ Injection site coordinates. + |
+
cuisto/utils.py
get_leaves_list(atlas)
+
+#Get the list of leaf brain regions.
+Leaf brain regions are defined as regions without childs, eg. regions that are at +the bottom of the hiearchy.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ atlas
+ |
+
+ BrainGlobeAtlas
+ |
+
+
+
+ Atlas to extract regions from. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
leaves_list |
+ list
+ |
+
+
+
+ Acronyms of leaf brain regions. + |
+
cuisto/utils.py
get_mapping_fusion(fusion_file)
+
+#Get mapping dictionnary between input brain regions and new regions defined in
+atlas_fusion.toml
file.
The returned dictionnary can be used in DataFrame.replace().
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ fusion_file
+ |
+
+ str
+ |
+
+
+
+ Path to the TOML file with the merging rules. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
m |
+ dict
+ |
+
+
+
+ Mapping as {old: new}. + |
+
cuisto/utils.py
get_starter_cells(animal, channel, info_file)
+
+#Get the number of starter cells associated with animal.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ animal
+ |
+
+ str
+ |
+
+
+
+ Animal ID. + |
+ + required + | +
+ channel
+ |
+
+ str
+ |
+
+
+
+ Channel ID. + |
+ + required + | +
+ info_file
+ |
+
+ str
+ |
+
+
+
+ Path to TOML info file. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
n_starters |
+ int
+ |
+
+
+
+ Number of starter cells. + |
+
cuisto/utils.py
merge_regions(df, col, fusion_file)
+
+#Merge brain regions following rules in the fusion_file.toml
file.
Apply this merging on col
of the input DataFrame. col
whose value is found in
+the members
sections in the file will be changed to the new acronym.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ col
+ |
+
+ str
+ |
+
+
+
+ Column of |
+ + required + | +
+ fusion_file
+ |
+
+ str
+ |
+
+
+
+ Path to the toml file with the merging rules. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ Same DataFrame with regions renamed. + |
+
cuisto/utils.py
renormalize_per_key(df, by, on)
+
+#Renormalize on
column by its sum for each by
.
Use case : relative density is computed for both hemispheres, so if one wants to
+plot only one hemisphere, the sum of the bars corresponding to one channel (by
)
+should be 1. So :
++ + +++++df = df[df["hemisphere"] == "Ipsi."] +df = renormalize_per_key(df, "channel", "relative density") +Then, the sum of "relative density" for each "channel" equals 1.
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ by
+ |
+
+ str
+ |
+
+
+
+ Key in |
+ + required + | +
+ on
+ |
+
+ str
+ |
+
+
+
+ Key in |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ Same DataFrame with normalized |
+
cuisto/utils.py
select_hemisphere_channel(df, hue, hue_filter, hue_mirror)
+
+#Select relevant data given hue and filters.
+Returns the DataFrame with only things to be used.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame to filter. + |
+ + required + | +
+ hue
+ |
+
+ (hemisphere, channel)
+ |
+
+
+
+ hue that will be used in seaborn plots. + |
+
+ "hemisphere"
+ |
+
+ hue_filter
+ |
+
+ str
+ |
+
+
+
+ Selected data. + |
+ + required + | +
+ hue_mirror
+ |
+
+ bool
+ |
+
+
+
+ Instead of keeping only hue_filter values, they will be plotted in mirror. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
dfplt |
+ DataFrame
+ |
+
+
+
+ DataFrame to be used in plots. + |
+
cuisto/utils.py
576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 |
|
This notebook shows how to load data exported from QuPath, compute metrics and display them, according to the configuration file. This is meant for a single-animal.
+There are some conventions that need to be met in the QuPath project so that the measurements are usable with cuisto
:
You should copy this notebook, the configuration file and the atlas-related configuration files (blacklist and fusion) elsewhere and edit them according to your need.
+The data was generated from QuPath with stardist cell detection on toy data.
+import pandas as pd
+
+import cuisto
+
# Full path to your configuration file, edited according to your need beforehand
+config_file = "../../resources/demo_config_cells.toml"
+
# - Files
+# animal identifier
+animal = "animalid0"
+# set the full path to the annotations tsv file from QuPath
+annotations_file = "../../resources/cells_measurements_annotations.tsv"
+# set the full path to the detections tsv file from QuPath
+detections_file = "../../resources/cells_measurements_detections.tsv"
+
# get configuration
+cfg = cuisto.config.Config(config_file)
+
# read data
+df_annotations = pd.read_csv(annotations_file, index_col="Object ID", sep="\t")
+df_detections = pd.read_csv(detections_file, index_col="Object ID", sep="\t")
+
+# remove annotations that are not brain regions
+df_annotations = df_annotations[df_annotations["Classification"] != "Region*"]
+df_annotations = df_annotations[df_annotations["ROI"] != "Rectangle"]
+
+# convert atlas coordinates from mm to microns
+df_detections[["Atlas_X", "Atlas_Y", "Atlas_Z"]] = df_detections[
+ ["Atlas_X", "Atlas_Y", "Atlas_Z"]
+].multiply(1000)
+
+# have a look
+display(df_annotations.head())
+display(df_detections.head())
+
+ | Image | +Object type | +Name | +Classification | +Parent | +ROI | +Centroid X µm | +Centroid Y µm | +Cells: marker+ Count | +Cells: marker- Count | +ID | +Side | +Parent ID | +Num Detections | +Num Cells: marker+ | +Num Cells: marker- | +Area µm^2 | +Perimeter µm | +
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Object ID | ++ | + | + | + | + | + | + | + | + | + | + | + | + | + | + | + | + | + |
4781ed63-0d8e-422e-aead-b685fbe20eb5 | +animalid0_030.ome.tiff | +Annotation | +Root | +NaN | +Root object (Image) | +Geometry | +5372.5 | +3922.1 | +0 | +0 | +NaN | +NaN | +NaN | +2441 | +136 | +2305 | +31666431.6 | +37111.9 | +
aa4b133d-13f9-42d9-8c21-45f143b41a85 | +animalid0_030.ome.tiff | +Annotation | +root | +Right: root | +Root | +Polygon | +7094.9 | +4085.7 | +0 | +0 | +997 | +0.0 | +NaN | +1284 | +41 | +1243 | +15882755.9 | +18819.5 | +
42c3b914-91c5-4b65-a603-3f9431717d48 | +animalid0_030.ome.tiff | +Annotation | +grey | +Right: grey | +root | +Geometry | +7256.8 | +4290.6 | +0 | +0 | +8 | +0.0 | +997.0 | +1009 | +24 | +985 | +12026268.7 | +49600.3 | +
887af3eb-4061-4f8a-aa4c-fe9b81184061 | +animalid0_030.ome.tiff | +Annotation | +CB | +Right: CB | +grey | +Geometry | +7778.7 | +3679.2 | +0 | +16 | +512 | +0.0 | +8.0 | +542 | +5 | +537 | +6943579.0 | +30600.2 | +
adaabc05-36d1-4aad-91fe-2e904adc574f | +animalid0_030.ome.tiff | +Annotation | +CBN | +Right: CBN | +CB | +Geometry | +6790.5 | +3567.9 | +0 | +0 | +519 | +0.0 | +512.0 | +55 | +1 | +54 | +864212.3 | +7147.4 | +
+ | Image | +Object type | +Name | +Classification | +Parent | +ROI | +Atlas_X | +Atlas_Y | +Atlas_Z | +
---|---|---|---|---|---|---|---|---|---|
Object ID | ++ | + | + | + | + | + | + | + | + |
5ff386a8-5abd-46d1-8e0d-f5c5365457c1 | +animalid0_030.ome.tiff | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11523.0 | +4272.4 | +4276.7 | +
9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 | +animalid0_030.ome.tiff | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11520.2 | +4278.4 | +4418.6 | +
481a519b-8b40-4450-9ec6-725181807d72 | +animalid0_030.ome.tiff | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11506.0 | +4317.2 | +4356.3 | +
fd28e09c-2c64-4750-b026-cd99e3526a57 | +animalid0_030.ome.tiff | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11528.4 | +4257.4 | +4336.4 | +
3d9ce034-f2ed-4c73-99be-f782363cf323 | +animalid0_030.ome.tiff | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11548.7 | +4203.3 | +4294.3 | +
# get distributions per regions, spatial distributions and coordinates
+df_regions, dfs_distributions, df_coordinates = cuisto.process.process_animal(
+ animal, df_annotations, df_detections, cfg, compute_distributions=True
+)
+
+# have a look
+display(df_regions.head())
+display(df_coordinates.head())
+
+ | Name | +hemisphere | +Area µm^2 | +Area mm^2 | +count | +density µm^-2 | +density mm^-2 | +coverage index | +relative count | +relative density | +channel | +animal | +
---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | +ACVII | +Left | +8307.1 | +0.008307 | +1 | +0.00012 | +120.378953 | +0.00012 | +0.002132 | +0.205275 | +Positive | +animalid0 | +
0 | +ACVII | +Left | +8307.1 | +0.008307 | +1 | +0.00012 | +120.378953 | +0.00012 | +0.000189 | +0.020671 | +Negative | +animalid0 | +
1 | +ACVII | +Right | +7061.4 | +0.007061 | +0 | +0.0 | +0.0 | +0.0 | +0.0 | +0.0 | +Positive | +animalid0 | +
1 | +ACVII | +Right | +7061.4 | +0.007061 | +1 | +0.000142 | +141.614977 | +0.000142 | +0.000144 | +0.021646 | +Negative | +animalid0 | +
2 | +ACVII | +both | +15368.5 | +0.015369 | +1 | +0.000065 | +65.068159 | +0.000065 | +0.001362 | +0.153797 | +Positive | +animalid0 | +
+ | Image | +Object type | +Name | +Classification | +Parent | +ROI | +Atlas_X | +Atlas_Y | +Atlas_Z | +hemisphere | +channel | +Atlas_AP | +Atlas_DV | +Atlas_ML | +animal | +
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Object ID | ++ | + | + | + | + | + | + | + | + | + | + | + | + | + | + |
5ff386a8-5abd-46d1-8e0d-f5c5365457c1 | +animalid0_030.ome.tiff | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11.5230 | +4.2724 | +4.2767 | +Right | +Negative | +-6.433716 | +3.098278 | +-1.4233 | +animalid0 | +
9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 | +animalid0_030.ome.tiff | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11.5202 | +4.2784 | +4.4186 | +Right | +Negative | +-6.431449 | +3.104147 | +-1.2814 | +animalid0 | +
481a519b-8b40-4450-9ec6-725181807d72 | +animalid0_030.ome.tiff | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11.5060 | +4.3172 | +4.3563 | +Right | +Negative | +-6.420685 | +3.141780 | +-1.3437 | +animalid0 | +
fd28e09c-2c64-4750-b026-cd99e3526a57 | +animalid0_030.ome.tiff | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11.5284 | +4.2574 | +4.3364 | +Right | +Negative | +-6.437788 | +3.083737 | +-1.3636 | +animalid0 | +
3d9ce034-f2ed-4c73-99be-f782363cf323 | +animalid0_030.ome.tiff | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11.5487 | +4.2033 | +4.2943 | +Right | +Negative | +-6.453296 | +3.031224 | +-1.4057 | +animalid0 | +
# plot distributions per regions
+figs_regions = cuisto.display.plot_regions(df_regions, cfg)
+# specify which regions to plot
+# figs_regions = cuisto.display.plot_regions(df_regions, cfg, names_list=["GRN", "IRN", "MDRNv"])
+
+# save as svg
+# figs_regions[0].savefig(r"C:\Users\glegoc\Downloads\regions_count.svg")
+# figs_regions[1].savefig(r"C:\Users\glegoc\Downloads\regions_density.svg")
+
# plot 1D distributions
+fig_distrib = cuisto.display.plot_1D_distributions(
+ dfs_distributions, cfg, df_coordinates=df_coordinates
+)
+
If there were several animal
in the measurement file, it would be displayed as mean +/- sem instead.
# plot heatmap (all types of cells pooled)
+fig_heatmap = cuisto.display.plot_2D_distributions(df_coordinates, cfg)
+
\n", + " | Image | \n", + "Object type | \n", + "Name | \n", + "Classification | \n", + "Parent | \n", + "ROI | \n", + "Centroid X µm | \n", + "Centroid Y µm | \n", + "Cells: marker+ Count | \n", + "Cells: marker- Count | \n", + "ID | \n", + "Side | \n", + "Parent ID | \n", + "Num Detections | \n", + "Num Cells: marker+ | \n", + "Num Cells: marker- | \n", + "Area µm^2 | \n", + "Perimeter µm | \n", + "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Object ID | \n", + "\n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " |
4781ed63-0d8e-422e-aead-b685fbe20eb5 | \n", + "animalid0_030.ome.tiff | \n", + "Annotation | \n", + "Root | \n", + "NaN | \n", + "Root object (Image) | \n", + "Geometry | \n", + "5372.5 | \n", + "3922.1 | \n", + "0 | \n", + "0 | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "2441 | \n", + "136 | \n", + "2305 | \n", + "31666431.6 | \n", + "37111.9 | \n", + "
aa4b133d-13f9-42d9-8c21-45f143b41a85 | \n", + "animalid0_030.ome.tiff | \n", + "Annotation | \n", + "root | \n", + "Right: root | \n", + "Root | \n", + "Polygon | \n", + "7094.9 | \n", + "4085.7 | \n", + "0 | \n", + "0 | \n", + "997 | \n", + "0.0 | \n", + "NaN | \n", + "1284 | \n", + "41 | \n", + "1243 | \n", + "15882755.9 | \n", + "18819.5 | \n", + "
42c3b914-91c5-4b65-a603-3f9431717d48 | \n", + "animalid0_030.ome.tiff | \n", + "Annotation | \n", + "grey | \n", + "Right: grey | \n", + "root | \n", + "Geometry | \n", + "7256.8 | \n", + "4290.6 | \n", + "0 | \n", + "0 | \n", + "8 | \n", + "0.0 | \n", + "997.0 | \n", + "1009 | \n", + "24 | \n", + "985 | \n", + "12026268.7 | \n", + "49600.3 | \n", + "
887af3eb-4061-4f8a-aa4c-fe9b81184061 | \n", + "animalid0_030.ome.tiff | \n", + "Annotation | \n", + "CB | \n", + "Right: CB | \n", + "grey | \n", + "Geometry | \n", + "7778.7 | \n", + "3679.2 | \n", + "0 | \n", + "16 | \n", + "512 | \n", + "0.0 | \n", + "8.0 | \n", + "542 | \n", + "5 | \n", + "537 | \n", + "6943579.0 | \n", + "30600.2 | \n", + "
adaabc05-36d1-4aad-91fe-2e904adc574f | \n", + "animalid0_030.ome.tiff | \n", + "Annotation | \n", + "CBN | \n", + "Right: CBN | \n", + "CB | \n", + "Geometry | \n", + "6790.5 | \n", + "3567.9 | \n", + "0 | \n", + "0 | \n", + "519 | \n", + "0.0 | \n", + "512.0 | \n", + "55 | \n", + "1 | \n", + "54 | \n", + "864212.3 | \n", + "7147.4 | \n", + "
\n", + " | Image | \n", + "Object type | \n", + "Name | \n", + "Classification | \n", + "Parent | \n", + "ROI | \n", + "Atlas_X | \n", + "Atlas_Y | \n", + "Atlas_Z | \n", + "
---|---|---|---|---|---|---|---|---|---|
Object ID | \n", + "\n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " |
5ff386a8-5abd-46d1-8e0d-f5c5365457c1 | \n", + "animalid0_030.ome.tiff | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11523.0 | \n", + "4272.4 | \n", + "4276.7 | \n", + "
9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 | \n", + "animalid0_030.ome.tiff | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11520.2 | \n", + "4278.4 | \n", + "4418.6 | \n", + "
481a519b-8b40-4450-9ec6-725181807d72 | \n", + "animalid0_030.ome.tiff | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11506.0 | \n", + "4317.2 | \n", + "4356.3 | \n", + "
fd28e09c-2c64-4750-b026-cd99e3526a57 | \n", + "animalid0_030.ome.tiff | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11528.4 | \n", + "4257.4 | \n", + "4336.4 | \n", + "
3d9ce034-f2ed-4c73-99be-f782363cf323 | \n", + "animalid0_030.ome.tiff | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11548.7 | \n", + "4203.3 | \n", + "4294.3 | \n", + "
\n", + " | Name | \n", + "hemisphere | \n", + "Area µm^2 | \n", + "Area mm^2 | \n", + "count | \n", + "density µm^-2 | \n", + "density mm^-2 | \n", + "coverage index | \n", + "relative count | \n", + "relative density | \n", + "channel | \n", + "animal | \n", + "
---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | \n", + "ACVII | \n", + "Left | \n", + "8307.1 | \n", + "0.008307 | \n", + "1 | \n", + "0.00012 | \n", + "120.378953 | \n", + "0.00012 | \n", + "0.002132 | \n", + "0.205275 | \n", + "Positive | \n", + "animalid0 | \n", + "
0 | \n", + "ACVII | \n", + "Left | \n", + "8307.1 | \n", + "0.008307 | \n", + "1 | \n", + "0.00012 | \n", + "120.378953 | \n", + "0.00012 | \n", + "0.000189 | \n", + "0.020671 | \n", + "Negative | \n", + "animalid0 | \n", + "
1 | \n", + "ACVII | \n", + "Right | \n", + "7061.4 | \n", + "0.007061 | \n", + "0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "Positive | \n", + "animalid0 | \n", + "
1 | \n", + "ACVII | \n", + "Right | \n", + "7061.4 | \n", + "0.007061 | \n", + "1 | \n", + "0.000142 | \n", + "141.614977 | \n", + "0.000142 | \n", + "0.000144 | \n", + "0.021646 | \n", + "Negative | \n", + "animalid0 | \n", + "
2 | \n", + "ACVII | \n", + "both | \n", + "15368.5 | \n", + "0.015369 | \n", + "1 | \n", + "0.000065 | \n", + "65.068159 | \n", + "0.000065 | \n", + "0.001362 | \n", + "0.153797 | \n", + "Positive | \n", + "animalid0 | \n", + "
\n", + " | Image | \n", + "Object type | \n", + "Name | \n", + "Classification | \n", + "Parent | \n", + "ROI | \n", + "Atlas_X | \n", + "Atlas_Y | \n", + "Atlas_Z | \n", + "hemisphere | \n", + "channel | \n", + "Atlas_AP | \n", + "Atlas_DV | \n", + "Atlas_ML | \n", + "animal | \n", + "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Object ID | \n", + "\n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " |
5ff386a8-5abd-46d1-8e0d-f5c5365457c1 | \n", + "animalid0_030.ome.tiff | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11.5230 | \n", + "4.2724 | \n", + "4.2767 | \n", + "Right | \n", + "Negative | \n", + "-6.433716 | \n", + "3.098278 | \n", + "-1.4233 | \n", + "animalid0 | \n", + "
9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 | \n", + "animalid0_030.ome.tiff | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11.5202 | \n", + "4.2784 | \n", + "4.4186 | \n", + "Right | \n", + "Negative | \n", + "-6.431449 | \n", + "3.104147 | \n", + "-1.2814 | \n", + "animalid0 | \n", + "
481a519b-8b40-4450-9ec6-725181807d72 | \n", + "animalid0_030.ome.tiff | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11.5060 | \n", + "4.3172 | \n", + "4.3563 | \n", + "Right | \n", + "Negative | \n", + "-6.420685 | \n", + "3.141780 | \n", + "-1.3437 | \n", + "animalid0 | \n", + "
fd28e09c-2c64-4750-b026-cd99e3526a57 | \n", + "animalid0_030.ome.tiff | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11.5284 | \n", + "4.2574 | \n", + "4.3364 | \n", + "Right | \n", + "Negative | \n", + "-6.437788 | \n", + "3.083737 | \n", + "-1.3636 | \n", + "animalid0 | \n", + "
3d9ce034-f2ed-4c73-99be-f782363cf323 | \n", + "animalid0_030.ome.tiff | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11.5487 | \n", + "4.2033 | \n", + "4.2943 | \n", + "Right | \n", + "Negative | \n", + "-6.453296 | \n", + "3.031224 | \n", + "-1.4057 | \n", + "animalid0 | \n", + "
Draw 2D heatmaps as density isolines.
+This notebook does not actually use histoquant
and relies only on brainglobe-heatmap to extract brain structures outlines.
Only the detections measurements with atlas coordinates exported from QuPath are used.
+You need to select the range of data to be used, the regions outlines will be extracted at the centroid of that range. Therefore, a range that is too large will be misleading and irrelevant.
+import brainglobe_heatmap as bgh
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import seaborn as sns
+
# path to the exported measurements from QuPath
+filename = "../../resources/cells_measurements_detections.tsv"
+
Settings
+# atlas to use
+atlas_name = "allen_mouse_10um"
+# brain regions whose outlines will be plotted
+regions = ["root", "CB", "MY", "GRN", "IRN"]
+# range to include, in Allen coordinates, in microns
+ap_lims = [9800, 10000] # lims : [0, 13200] for coronal
+ml_lims = [5600, 5800] # lims : [0, 11400] for sagittal
+dv_lims = [3900, 4100] # lims : [0, 8000] for top
+# number of isolines
+nlevels = 5
+# color mapping between classification and matplotlib color
+palette = {"Cells: marker-": "#d8782f", "Cells: marker+": "#8ccb73"}
+
df = pd.read_csv(filename, sep="\t")
+display(df.head())
+
+ | Image | +Object ID | +Object type | +Name | +Classification | +Parent | +ROI | +Atlas_X | +Atlas_Y | +Atlas_Z | +
---|---|---|---|---|---|---|---|---|---|---|
0 | +animalid0_030.ome.tiff | +5ff386a8-5abd-46d1-8e0d-f5c5365457c1 | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11.5230 | +4.2724 | +4.2767 | +
1 | +animalid0_030.ome.tiff | +9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11.5202 | +4.2784 | +4.4186 | +
2 | +animalid0_030.ome.tiff | +481a519b-8b40-4450-9ec6-725181807d72 | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11.5060 | +4.3172 | +4.3563 | +
3 | +animalid0_030.ome.tiff | +fd28e09c-2c64-4750-b026-cd99e3526a57 | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11.5284 | +4.2574 | +4.3364 | +
4 | +animalid0_030.ome.tiff | +3d9ce034-f2ed-4c73-99be-f782363cf323 | +Detection | +NaN | +Cells: marker- | +VeCB | +Polygon | +11.5487 | +4.2033 | +4.2943 | +
Here we can filter out classifications we don't wan't to display.
+# select objects
+# df = df[df["Classification"] == "example: classification"]
+
# get outline coordinates in coronal (=frontal) orientation
+coords_coronal = bgh.get_structures_slice_coords(
+ regions,
+ orientation="frontal",
+ atlas_name=atlas_name,
+ position=(np.mean(ap_lims), 0, 0),
+)
+# get outline coordinates in sagittal orientation
+coords_sagittal = bgh.get_structures_slice_coords(
+ regions,
+ orientation="sagittal",
+ atlas_name=atlas_name,
+ position=(0, 0, np.mean(ml_lims)),
+)
+# get outline coordinates in top (=horizontal) orientation
+coords_top = bgh.get_structures_slice_coords(
+ regions,
+ orientation="horizontal",
+ atlas_name=atlas_name,
+ position=(0, np.mean(dv_lims), 0),
+)
+
# Coronal projection
+# select objects within the rostro-caudal range
+df_coronal = df[
+ (df["Atlas_X"] >= ap_lims[0] / 1000) & (df["Atlas_X"] <= ap_lims[1] / 1000)
+]
+
+plt.figure()
+
+for struct_name, contours in coords_coronal.items():
+ for cont in contours:
+ plt.fill(cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc="none", ec="k")
+
+# see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize
+ax = sns.kdeplot(
+ df_coronal,
+ x="Atlas_Z",
+ y="Atlas_Y",
+ hue="Classification",
+ levels=nlevels,
+ common_norm=False,
+ palette=palette,
+)
+ax.invert_yaxis()
+sns.despine(left=True, bottom=True)
+plt.axis("equal")
+plt.xlabel(None)
+plt.ylabel(None)
+plt.xticks([])
+plt.yticks([])
+plt.plot([2, 3], [8, 8], "k", linewidth=3)
+plt.text(2, 7.9, "1 mm")
+
Text(2, 7.9, '1 mm')+
# Sagittal projection
+# select objects within the medio-lateral range
+df_sagittal = df[
+ (df["Atlas_Z"] >= ml_lims[0] / 1000) & (df["Atlas_Z"] <= ml_lims[1] / 1000)
+]
+
+plt.figure()
+
+for struct_name, contours in coords_sagittal.items():
+ for cont in contours:
+ plt.fill(cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc="none", ec="k")
+
+# see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize
+ax = sns.kdeplot(
+ df_sagittal,
+ x="Atlas_X",
+ y="Atlas_Y",
+ hue="Classification",
+ levels=nlevels,
+ common_norm=False,
+ palette=palette,
+)
+ax.invert_yaxis()
+sns.despine(left=True, bottom=True)
+plt.axis("equal")
+plt.xlabel(None)
+plt.ylabel(None)
+plt.xticks([])
+plt.yticks([])
+plt.plot([2, 3], [7.1, 7.1], "k", linewidth=3)
+plt.text(2, 7, "1 mm")
+
Text(2, 7, '1 mm')+
# Top projection
+# select objects within the dorso-ventral range
+df_top = df[(df["Atlas_Y"] >= dv_lims[0] / 1000) & (df["Atlas_Y"] <= dv_lims[1] / 1000)]
+
+plt.figure()
+
+for struct_name, contours in coords_top.items():
+ for cont in contours:
+ plt.fill(-cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc="none", ec="k")
+
+# see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize
+ax = sns.kdeplot(
+ df_top,
+ x="Atlas_Z",
+ y="Atlas_X",
+ hue="Classification",
+ levels=nlevels,
+ common_norm=False,
+ palette=palette,
+)
+ax.invert_yaxis()
+sns.despine(left=True, bottom=True)
+plt.axis("equal")
+plt.xlabel(None)
+plt.ylabel(None)
+plt.xticks([])
+plt.yticks([])
+plt.plot([0.5, 1.5], [0.5, 0.5], "k", linewidth=3)
+plt.text(0.5, 0.4, "1 mm")
+
Text(0.5, 0.4, '1 mm')+
+
\n", + " | Image | \n", + "Object ID | \n", + "Object type | \n", + "Name | \n", + "Classification | \n", + "Parent | \n", + "ROI | \n", + "Atlas_X | \n", + "Atlas_Y | \n", + "Atlas_Z | \n", + "
---|---|---|---|---|---|---|---|---|---|---|
0 | \n", + "animalid0_030.ome.tiff | \n", + "5ff386a8-5abd-46d1-8e0d-f5c5365457c1 | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11.5230 | \n", + "4.2724 | \n", + "4.2767 | \n", + "
1 | \n", + "animalid0_030.ome.tiff | \n", + "9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11.5202 | \n", + "4.2784 | \n", + "4.4186 | \n", + "
2 | \n", + "animalid0_030.ome.tiff | \n", + "481a519b-8b40-4450-9ec6-725181807d72 | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11.5060 | \n", + "4.3172 | \n", + "4.3563 | \n", + "
3 | \n", + "animalid0_030.ome.tiff | \n", + "fd28e09c-2c64-4750-b026-cd99e3526a57 | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11.5284 | \n", + "4.2574 | \n", + "4.3364 | \n", + "
4 | \n", + "animalid0_030.ome.tiff | \n", + "3d9ce034-f2ed-4c73-99be-f782363cf323 | \n", + "Detection | \n", + "NaN | \n", + "Cells: marker- | \n", + "VeCB | \n", + "Polygon | \n", + "11.5487 | \n", + "4.2033 | \n", + "4.2943 | \n", + "
Plot regions coverage percentage in the spinal cord.
+This showcases that any brainglobe atlases should be supported.
+Here we're going to quantify the percentage of area of each spinal cord regions innervated by axons.
+The "area µm^2" measurement for each annotations can be created in QuPath with a pixel classifier, using the Measure button.
+We're going to consider that the "area µm^2" measurement generated by the pixel classifier is an object count.
+histoquant
computes a density, which is the count in each region divided by its aera.
+Therefore, in this case, it will be actually the fraction of area covered by fibers in a given color.
The data was generated using QuPath with a pixel classifier on toy data.
+import pandas as pd
+
+import cuisto
+
# Full path to your configuration file, edited according to your need beforehand
+config_file = "../../resources/demo_config_fibers.toml"
+
# - Files
+# not important if only one animal
+animal = "animalid1-SC"
+# set the full path to the annotations tsv file from QuPath
+annotations_file = "../../resources/fibers_measurements_annotations.tsv"
+
# get configuration
+cfg = cuisto.config.Config(config_file)
+
# read data
+df_annotations = pd.read_csv(annotations_file, index_col="Object ID", sep="\t")
+df_detections = pd.DataFrame() # empty DataFrame
+
+# remove annotations that are not brain regions
+df_annotations = df_annotations[df_annotations["Classification"] != "Region*"]
+df_annotations = df_annotations[df_annotations["ROI"] != "Rectangle"]
+
+# have a look
+display(df_annotations.head())
+
+ | Image | +Object type | +Name | +Classification | +Parent | +ROI | +Centroid X µm | +Centroid Y µm | +Fibers: EGFP area µm^2 | +Fibers: DsRed area µm^2 | +ID | +Side | +Parent ID | +Area µm^2 | +Perimeter µm | +
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Object ID | ++ | + | + | + | + | + | + | + | + | + | + | + | + | + | + |
dcfe5196-4e8d-4126-b255-a9ea393c383a | +animalid1-SC_s1.ome.tiff | +Annotation | +Root | +NaN | +Root object (Image) | +Geometry | +1353.70 | +1060.00 | +108993.1953 | +15533.3701 | +NaN | +NaN | +NaN | +3172474.0 | +9853.3 | +
acc74bc0-3dd0-4b3e-86e3-e6c7b681d544 | +animalid1-SC_s1.ome.tiff | +Annotation | +root | +Right: root | +Root | +Polygon | +864.44 | +989.95 | +39162.8906 | +5093.2798 | +250.0 | +0.0 | +NaN | +1603335.7 | +4844.2 | +
94571cf9-f22b-453f-860c-eb13d0e72440 | +animalid1-SC_s1.ome.tiff | +Annotation | +WM | +Right: WM | +root | +Geometry | +791.00 | +1094.60 | +20189.0469 | +2582.4824 | +130.0 | +0.0 | +250.0 | +884002.0 | +7927.8 | +
473d65fb-fda4-4721-ba6f-cc659efc1d5a | +animalid1-SC_s1.ome.tiff | +Annotation | +vf | +Right: vf | +WM | +Polygon | +984.31 | +1599.00 | +6298.3574 | +940.4100 | +70.0 | +0.0 | +130.0 | +281816.9 | +2719.5 | +
449e2cd1-eca2-4708-83fe-651f378c3a14 | +animalid1-SC_s1.ome.tiff | +Annotation | +df | +Right: df | +WM | +Polygon | +1242.90 | +401.26 | +1545.0750 | +241.3800 | +74.0 | +0.0 | +130.0 | +152952.8 | +1694.4 | +
# get distributions per regions, spatial distributions and coordinates
+df_regions, dfs_distributions, df_coordinates = cuisto.process.process_animal(
+ animal, df_annotations, df_detections, cfg, compute_distributions=False
+)
+
+# convert the "density µm^-2" column, which is actually the coverage fraction, to a percentage
+df_regions["density µm^-2"] = df_regions["density µm^-2"] * 100
+
+# have a look
+display(df_regions.head())
+
+ | Name | +hemisphere | +Area µm^2 | +Area mm^2 | +area µm^2 | +area mm^2 | +density µm^-2 | +density mm^-2 | +coverage index | +relative count | +relative density | +channel | +animal | +
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | +10Sp | +Contra. | +1749462.18 | +1.749462 | +53117.3701 | +53.11737 | +3.036211 | +30362.113973 | +1612.755645 | +0.036535 | +0.033062 | +Negative | +animalid1-SC | +
0 | +10Sp | +Contra. | +1749462.18 | +1.749462 | +5257.1025 | +5.257103 | +0.300498 | +3004.98208 | +15.797499 | +0.030766 | +0.02085 | +Positive | +animalid1-SC | +
1 | +10Sp | +Ipsi. | +1439105.93 | +1.439106 | +64182.9823 | +64.182982 | +4.459921 | +44599.206328 | +2862.51007 | +0.023524 | +0.023265 | +Negative | +animalid1-SC | +
1 | +10Sp | +Ipsi. | +1439105.93 | +1.439106 | +8046.3375 | +8.046337 | +0.559121 | +5591.205854 | +44.988729 | +0.028911 | +0.022984 | +Positive | +animalid1-SC | +
2 | +10Sp | +both | +3188568.11 | +3.188568 | +117300.3524 | +117.300352 | +3.678778 | +36787.783216 | +4315.219935 | +0.028047 | +0.025734 | +Negative | +animalid1-SC | +
# plot distributions per regions
+fig_regions = cuisto.display.plot_regions(df_regions, cfg)
+# specify which regions to plot
+# fig_regions = hq.display.plot_regions(df_regions, cfg, names_list=["Rh9", "Sr9", "8Sp"])
+
+# save as svg
+# fig_regions[0].savefig(r"C:\Users\glegoc\Downloads\nice_figure.svg")
+
\n", + " | Image | \n", + "Object type | \n", + "Name | \n", + "Classification | \n", + "Parent | \n", + "ROI | \n", + "Centroid X µm | \n", + "Centroid Y µm | \n", + "Fibers: EGFP area µm^2 | \n", + "Fibers: DsRed area µm^2 | \n", + "ID | \n", + "Side | \n", + "Parent ID | \n", + "Area µm^2 | \n", + "Perimeter µm | \n", + "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Object ID | \n", + "\n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " | \n", + " |
dcfe5196-4e8d-4126-b255-a9ea393c383a | \n", + "animalid1-SC_s1.ome.tiff | \n", + "Annotation | \n", + "Root | \n", + "NaN | \n", + "Root object (Image) | \n", + "Geometry | \n", + "1353.70 | \n", + "1060.00 | \n", + "108993.1953 | \n", + "15533.3701 | \n", + "NaN | \n", + "NaN | \n", + "NaN | \n", + "3172474.0 | \n", + "9853.3 | \n", + "
acc74bc0-3dd0-4b3e-86e3-e6c7b681d544 | \n", + "animalid1-SC_s1.ome.tiff | \n", + "Annotation | \n", + "root | \n", + "Right: root | \n", + "Root | \n", + "Polygon | \n", + "864.44 | \n", + "989.95 | \n", + "39162.8906 | \n", + "5093.2798 | \n", + "250.0 | \n", + "0.0 | \n", + "NaN | \n", + "1603335.7 | \n", + "4844.2 | \n", + "
94571cf9-f22b-453f-860c-eb13d0e72440 | \n", + "animalid1-SC_s1.ome.tiff | \n", + "Annotation | \n", + "WM | \n", + "Right: WM | \n", + "root | \n", + "Geometry | \n", + "791.00 | \n", + "1094.60 | \n", + "20189.0469 | \n", + "2582.4824 | \n", + "130.0 | \n", + "0.0 | \n", + "250.0 | \n", + "884002.0 | \n", + "7927.8 | \n", + "
473d65fb-fda4-4721-ba6f-cc659efc1d5a | \n", + "animalid1-SC_s1.ome.tiff | \n", + "Annotation | \n", + "vf | \n", + "Right: vf | \n", + "WM | \n", + "Polygon | \n", + "984.31 | \n", + "1599.00 | \n", + "6298.3574 | \n", + "940.4100 | \n", + "70.0 | \n", + "0.0 | \n", + "130.0 | \n", + "281816.9 | \n", + "2719.5 | \n", + "
449e2cd1-eca2-4708-83fe-651f378c3a14 | \n", + "animalid1-SC_s1.ome.tiff | \n", + "Annotation | \n", + "df | \n", + "Right: df | \n", + "WM | \n", + "Polygon | \n", + "1242.90 | \n", + "401.26 | \n", + "1545.0750 | \n", + "241.3800 | \n", + "74.0 | \n", + "0.0 | \n", + "130.0 | \n", + "152952.8 | \n", + "1694.4 | \n", + "
\n", + " | Name | \n", + "hemisphere | \n", + "Area µm^2 | \n", + "Area mm^2 | \n", + "area µm^2 | \n", + "area mm^2 | \n", + "density µm^-2 | \n", + "density mm^-2 | \n", + "coverage index | \n", + "relative count | \n", + "relative density | \n", + "channel | \n", + "animal | \n", + "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | \n", + "10Sp | \n", + "Contra. | \n", + "1749462.18 | \n", + "1.749462 | \n", + "53117.3701 | \n", + "53.11737 | \n", + "3.036211 | \n", + "30362.113973 | \n", + "1612.755645 | \n", + "0.036535 | \n", + "0.033062 | \n", + "Negative | \n", + "animalid1-SC | \n", + "
0 | \n", + "10Sp | \n", + "Contra. | \n", + "1749462.18 | \n", + "1.749462 | \n", + "5257.1025 | \n", + "5.257103 | \n", + "0.300498 | \n", + "3004.98208 | \n", + "15.797499 | \n", + "0.030766 | \n", + "0.02085 | \n", + "Positive | \n", + "animalid1-SC | \n", + "
1 | \n", + "10Sp | \n", + "Ipsi. | \n", + "1439105.93 | \n", + "1.439106 | \n", + "64182.9823 | \n", + "64.182982 | \n", + "4.459921 | \n", + "44599.206328 | \n", + "2862.51007 | \n", + "0.023524 | \n", + "0.023265 | \n", + "Negative | \n", + "animalid1-SC | \n", + "
1 | \n", + "10Sp | \n", + "Ipsi. | \n", + "1439105.93 | \n", + "1.439106 | \n", + "8046.3375 | \n", + "8.046337 | \n", + "0.559121 | \n", + "5591.205854 | \n", + "44.988729 | \n", + "0.028911 | \n", + "0.022984 | \n", + "Positive | \n", + "animalid1-SC | \n", + "
2 | \n", + "10Sp | \n", + "both | \n", + "3188568.11 | \n", + "3.188568 | \n", + "117300.3524 | \n", + "117.300352 | \n", + "3.678778 | \n", + "36787.783216 | \n", + "4315.219935 | \n", + "0.028047 | \n", + "0.025734 | \n", + "Negative | \n", + "animalid1-SC | \n", + "
This example uses synthetic data to showcase how histoquant
can be used in a pipeline.
Annotations measurements should be exported from QuPath, following the required directory structure.
+Alternatively, you can merge all your CSV files yourself, one per animal, adding an animal ID to each table. Those can be processed with the histoquant.process.process_animal()
function, in a loop, collecting the results at each iteration and finally concatenating the results. Finally, those can be used with display
module. See the API reference for the process
module.
import cuisto
+
# Full path to your configuration file, edited according to your need beforehand
+config_file = "../../resources/demo_config_multi.toml"
+
# Files
+wdir = "../../resources/multi"
+animals = ["mouse0", "mouse1"]
+
# get configuration
+cfg = cuisto.Config(config_file)
+
# get distributions per regions
+df_regions, _, _ = cuisto.process.process_animals(
+ wdir, animals, cfg, compute_distributions=False
+)
+
+# have a look
+display(df_regions.head(10))
+
Processing mouse1: 100%|██████████| 2/2 [00:00<00:00, 15.66it/s] ++
+ | Name | +hemisphere | +Area µm^2 | +Area mm^2 | +length µm | +length mm | +density µm^-1 | +density mm^-1 | +coverage index | +relative count | +relative density | +channel | +animal | +
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | +ACVII | +Contra. | +9099.04 | +0.009099 | +468.0381 | +0.468038 | +0.051438 | +51438.184688 | +24.07503 | +0.00064 | +0.022168 | +marker3 | +mouse0 | +
1 | +ACVII | +Contra. | +9099.04 | +0.009099 | +4260.4844 | +4.260484 | +0.468234 | +468234.495068 | +1994.905762 | +0.0019 | +0.056502 | +marker2 | +mouse0 | +
2 | +ACVII | +Contra. | +9099.04 | +0.009099 | +5337.7103 | +5.33771 | +0.586623 | +586623.45698 | +3131.226069 | +0.010104 | +0.242734 | +marker1 | +mouse0 | +
3 | +ACVII | +Ipsi. | +4609.90 | +0.004610 | +0.0 | +0.0 | +0.0 | +0.0 | +0.0 | +0.0 | +0.0 | +marker3 | +mouse0 | +
4 | +ACVII | +Ipsi. | +4609.90 | +0.004610 | +0.0 | +0.0 | +0.0 | +0.0 | +0.0 | +0.0 | +0.0 | +marker2 | +mouse0 | +
5 | +ACVII | +Ipsi. | +4609.90 | +0.004610 | +0.0 | +0.0 | +0.0 | +0.0 | +0.0 | +0.0 | +0.0 | +marker1 | +mouse0 | +
6 | +ACVII | +both | +13708.94 | +0.013709 | +468.0381 | +0.468038 | +0.034141 | +34141.086036 | +15.979329 | +0.000284 | +0.011001 | +marker3 | +mouse0 | +
7 | +ACVII | +both | +13708.94 | +0.013709 | +4260.4844 | +4.260484 | +0.310781 | +310781.460857 | +1324.079566 | +0.000934 | +0.030688 | +marker2 | +mouse0 | +
8 | +ACVII | +both | +13708.94 | +0.013709 | +5337.7103 | +5.33771 | +0.38936 | +389359.811918 | +2078.289878 | +0.00534 | +0.142623 | +marker1 | +mouse0 | +
9 | +AMB | +Contra. | +122463.80 | +0.122464 | +30482.7815 | +30.482782 | +0.248913 | +248912.588863 | +7587.548059 | +0.041712 | +0.107271 | +marker3 | +mouse0 | +
figs_regions = cuisto.display.plot_regions(df_regions, cfg)
+
\n", + " | Name | \n", + "hemisphere | \n", + "Area µm^2 | \n", + "Area mm^2 | \n", + "length µm | \n", + "length mm | \n", + "density µm^-1 | \n", + "density mm^-1 | \n", + "coverage index | \n", + "relative count | \n", + "relative density | \n", + "channel | \n", + "animal | \n", + "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | \n", + "ACVII | \n", + "Contra. | \n", + "9099.04 | \n", + "0.009099 | \n", + "468.0381 | \n", + "0.468038 | \n", + "0.051438 | \n", + "51438.184688 | \n", + "24.07503 | \n", + "0.00064 | \n", + "0.022168 | \n", + "marker3 | \n", + "mouse0 | \n", + "
1 | \n", + "ACVII | \n", + "Contra. | \n", + "9099.04 | \n", + "0.009099 | \n", + "4260.4844 | \n", + "4.260484 | \n", + "0.468234 | \n", + "468234.495068 | \n", + "1994.905762 | \n", + "0.0019 | \n", + "0.056502 | \n", + "marker2 | \n", + "mouse0 | \n", + "
2 | \n", + "ACVII | \n", + "Contra. | \n", + "9099.04 | \n", + "0.009099 | \n", + "5337.7103 | \n", + "5.33771 | \n", + "0.586623 | \n", + "586623.45698 | \n", + "3131.226069 | \n", + "0.010104 | \n", + "0.242734 | \n", + "marker1 | \n", + "mouse0 | \n", + "
3 | \n", + "ACVII | \n", + "Ipsi. | \n", + "4609.90 | \n", + "0.004610 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "marker3 | \n", + "mouse0 | \n", + "
4 | \n", + "ACVII | \n", + "Ipsi. | \n", + "4609.90 | \n", + "0.004610 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "marker2 | \n", + "mouse0 | \n", + "
5 | \n", + "ACVII | \n", + "Ipsi. | \n", + "4609.90 | \n", + "0.004610 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "0.0 | \n", + "marker1 | \n", + "mouse0 | \n", + "
6 | \n", + "ACVII | \n", + "both | \n", + "13708.94 | \n", + "0.013709 | \n", + "468.0381 | \n", + "0.468038 | \n", + "0.034141 | \n", + "34141.086036 | \n", + "15.979329 | \n", + "0.000284 | \n", + "0.011001 | \n", + "marker3 | \n", + "mouse0 | \n", + "
7 | \n", + "ACVII | \n", + "both | \n", + "13708.94 | \n", + "0.013709 | \n", + "4260.4844 | \n", + "4.260484 | \n", + "0.310781 | \n", + "310781.460857 | \n", + "1324.079566 | \n", + "0.000934 | \n", + "0.030688 | \n", + "marker2 | \n", + "mouse0 | \n", + "
8 | \n", + "ACVII | \n", + "both | \n", + "13708.94 | \n", + "0.013709 | \n", + "5337.7103 | \n", + "5.33771 | \n", + "0.38936 | \n", + "389359.811918 | \n", + "2078.289878 | \n", + "0.00534 | \n", + "0.142623 | \n", + "marker1 | \n", + "mouse0 | \n", + "
9 | \n", + "AMB | \n", + "Contra. | \n", + "122463.80 | \n", + "0.122464 | \n", + "30482.7815 | \n", + "30.482782 | \n", + "0.248913 | \n", + "248912.588863 | \n", + "7587.548059 | \n", + "0.041712 | \n", + "0.107271 | \n", + "marker3 | \n", + "mouse0 | \n", + "
This page will guide you to use the pyramid-creator
package, in the event the CZI file does not work directly in QuPath. The script will generate pyramids from OME-TIFF files exported from ZEN.
Tip
+pyramid-creator
can also pyramidalize images using Python only with the --no-use-qupath
option.
This Python script uses QuPath under the hood, via a companion script called createPyramids.groovy
. It will find the OME-TIFF files and make QuPath run the groovy script on it, in console mode (without graphical user interface).
This script is standalone, eg. it does not rely on the cuisto
package. But installing the later makes sure all dependencies are installed (namely typer
and tqdm
with the QuPath backend and quite a few more for the Python backend).
pyramid-creator
moved to a standalone package that you can find here with installation and usage instructions.
You will find instructions on the dedicated project page over at Github.
+For reference :
+You will need conda
, follow those instructions to install it.
Then, create a virtual environment if you didn't already (pyramid-creator
can be installed in the environment for cuisto
) and install the pyramid-creator
package.
+
conda create -c conda-forge -n cuisto-env python=3.12 # not required if you already create an environment
+conda activate cuisto-env
+pip install pyramid-creator
+
tifffile
), replace the last line with :
+
+To use the QuPath backend, a working QuPath installation is required, and the pyramid-creator
command needs to be aware of its location.
+To do so, first, install QuPath. By default, it will install in ~\AppData\QuPath-0.X.Y
. In any case, note down the installation location.
Then, you have several options :
+- Create a file in your user directory called "QUPATH_PATH" (without extension), containing the full path to the QuPath console executable. In my case, it reads : C:\Users\glegoc\AppData\Local\QuPath-0.5.1\QuPath-0.5.1 (console).exe
. Then, the pyramid-creator
script will read this file to find the QuPath executable.
+- Specify the QuPath path as an option when calling the command line interface (see the Usage section) :
+
pyramid-creator /path/to/your/images --qupath-path "C:\Users\glegoc\AppData\Local\QuPath-0.5.1\QuPath-0.5.1 (console).exe"
+
from pyramid_creator import pyramidalize_directory
+pyramidalize_directory("/path/to/your/images/", qupath_path="C:\Users\glegoc\AppData\Local\QuPath-0.5.1\QuPath-0.5.1 (console).exe")
+
pyramid-creator
should find it automatically and write it down in the "QUPATH_PATH" file by itself.
+OME-TIFF is a specification of the TIFF image format. It specifies how the metadata should be written to the file to be interoperable between softwares. ZEN can export to OME-TIFF so you don't need to pay attention to metadata. Therefore, you won't need to specify pixel size and channels names and colors as it will be read directly from the OME-TIFF files.
+The OME-TIFF files should be ready to be pyramidalized with the create_pyramids.py
script.
See the instructions on the dedicated project page over at Github.
+ + + + + + + + + + + + + + + + + + + + + + + +You can head to the ABBA documentation for installation instructions. You'll see that a Windows installer is available. While it might be working great, I prefer to do it manually step-by-step to make sure everything is going well.
+You will find below installation instructions for the regular ABBA Fiji plugin, which proposes only the mouse and rat brain atlases. To be able to use the Brainglobe atlases, you will need the Python version. The two can be installed alongside each other.
+Install the "batteries-included" distribution of ImageJ, Fiji, from the official website.
+Warning
+Extract Fiji somewhere you have write access, otherwise Fiji will not be able to download and install plugins. In other words, put the folder in your User directory and not in C:\, C:\Program Files and the like.
+We need to add the PTBIOP update site, managed by the bio-imaging and optics facility at EPFL, that contains the ABBA plugin.
+Help > Update
... Manage Update Sites
. Look up PTBIOP
, and click on the check box. Apply and Close
, and Apply Changes
.
+This will download and install the required plugins. Restart ImageJ as suggested. Plugins > BIOP > Atlas > ABBA - ABBA start
, or simply type abba start
in the search box.ABBA can leverage the elastix toolbox for automatic 2D in-plane registration.
+ABBA should be installed and functional ! You can check the official documentation for usage instructions and some tips here.
+Brainglobe is an initiative aiming at providing interoperable, model-agnostic Python-based tools for neuroanatomy. They package various published volumetric anatomical atlases of different species (check the list), including the Allen Mouse brain atlas (CCFv3, ref.) and a 3D version of the Allen mouse spinal cord atlas (ref).
+To be able to leverage those atlases, we need to make ImageJ and Python be able to talk to each other. This is the purpose of abba_python, that will install ImageJ and its ABBA plugins inside a python environment, with bindings between the two worlds.
+conda
#If not done already, follow those instructions to install conda
.
Tip
+Afterwards, to launch ImageJ from Python and do some registration work, you just need to launch a terminal (PowerShell), and do steps 4., 6., and 7.
+You can follow the same instructions as the regular Fiji version. You can do it from either the "normal" Fiji or the ImageJ instance launched from Python, they share the same configuration files. Therefore, if you already did it in regular Fiji, elastix should already be set up and ready to use in ImageJ from Python.
+Unfortunately on some computers, Python does not find the Java virtual machine even though it should have been installed when installing OpenJDK with conda. This will result in an error mentionning "java.dll" and suggesting to check the JAVA_HOME
environment variable.
The only fix I could find is to install Java system-wide. You can grab a (free) installer on Adoptium, choosing JRE 17.X for your platform.
+During the installation :
Restart the terminal and try again. Now, ImageJ should use the system-wide Java and it should work.
+To import registered regions in your QuPath project and be able to convert objects' coordinates in atlas space, the ABBA QuPath extension is required.
+Edit > Preferences
. In the Extension
tab, set your QuPath user directory
to a local directory (usually C:\Users\USERNAME\QuPath\v0.X.Y
).extensions
in your QuPath user directory.qupath-extension-abba-x.y.z.zip
).extensions
folder in your QuPath user directory.Extensions
, you should have an ABBA
entry.While you can use QuPath and cuisto
functionalities as you see fit, there exists a pipeline version of those. It requires a specific structure to store files (so that the different scripts know where to look for data). It also requires that you have detections stored as geojson files, which can be achieved using a pixel classifier and further segmentation (see here) for example.
This is especially useful to perform quantification for several animals at once, where you'll only need to specify the root directory and the animals identifiers that should be pooled together, instead of having to manually specify each detections and annotations files.
+Three main scripts and function are used within the pipeline :
+exportPixelClassifierProbabilities.groovy
to create prediction maps of objects of interestsegment_image.py
to segment those maps and create geojson files to be imported back to QuPath as detectionspipelineImportExport.groovy
to :$folderPrefix$segmentation/$segTag$/geojson
)Following a specific directory structure ensures subsequent scripts and functions can find required files. The good news is that this structure will mostly be created automatically using the segmentation scripts (from QuPath and Python), as long as you stay consistent filling the parameters of each script.
+The structure expected by the groovy all-in-one script and cuisto
batch-process function is the following :
some_directory/
+ ├──AnimalID0/
+ │ ├── animalid0_qupath/
+ │ └── animalid0_segmentation/
+ │ └── segtag/
+ │ ├── annotations/
+ │ ├── detections/
+ │ ├── geojson/
+ │ └── probabilities/
+ ├──AnimalID1/
+ │ ├── animalid1_qupath/
+ │ └── animalid1_segmentation/
+ │ └── segtag/
+ │ ├── annotations/
+ │ ├── detections/
+ │ ├── geojson/
+ │ └── probabilities/
+
Info
+Except the root directory and the QuPath project, the rest is automatically created based on the parameters provided in the different scripts. Here's the description of the structure and the requirements :
+animalid0
should be a convenient animal identifier.AnimalID0
, can be anything but should correspond to one and only one animal.animalid0
should be lower case.animalid0_qupath
can be named as you wish in practice, but should be the QuPath project.animalid0_segmentation
should be called exactly like this -- replacing animalid0
with the actual animal ID. It will be created automatically with the exportPixelClassifierProbabilities.groovy
script.segtag
corresponds to the type of segmentation (cells, fibers...). It is specified in the exportPixelClassifierProbabilities
script. It could be anything, but to recognize if the objects are polygons (and should be counted per regions) or polylines (and the cumulated length should be measured), there are some hardcoded keywords in the segment_images.py
and pipelineImportExport.groovy
scripts :cells
, cell
, polygons
, polygon
synapto
, synaptophysin
, syngfp
, boutons
, points
fibers
, fiber
, axons
, axon
annotations
contains the atlas regions measurements as TSV files.detections
contains the objects atlas coordinates and measurements as CSV files (for punctal objects) or JSON (for polylines objects).geojson
contains objects stored as geojson files. They could be generated with the pixel classifier prediction map segmentation.probabilities
contains the prediction maps to be segmented by the segment_images.py
script.Tip
+You can see an example minimal directory structure with only annotations stored in resources/multi
.
Tip
+Remember that this is merely an example pipeline, you can shortcut it at any points, as long as you end up with TSV files following the requirements for cuisto
.
exportPixelClassifierProbabilities.groovy
script. You need to get a pixel classifier or create one.segment_images.py
script to generate the geojson files containing the objects of interest.pipelineImportExport.groovy
script on your QuPath project.Tip
+You can see a live example in this demo notebook.
+cuisto
uses some QuPath classifications concepts, make sure to be familiar with them with the official documentation. Notably, we use the concept of primary classification and derived classification : an object classfied as First: second
is of classification First
and of derived classification second
.
cuisto
assumes a specific way of storing regions and objects information in the TSV files exported from QuPath. Note that only one primary classification is supported, but you can have any number of derived classifications.
Detections are the objects of interest. Their information must respect the following :
+Atlas_X
, Atlas_Y
, Atlas_Z
. They correspond, respectively, to the anterio-posterior (rostro-caudal) axis, the inferio-superior (dorso-ventral) axis and the left-right (medio-lateral) axis.Primary: second
. Primary would be an object type (cells, fibers, ...), the second one would be a biological marker or a detection channel (fluorescence channel name), for instance : Cells: some marker
, or Fibers: EGFP
.Annotations correspond to the atlas regions. Their information must respect the following :
+Hemisphere: acronym
(for ex. Left: PAG
).Primary classification: derived classification measurement name
.Cells: some marker Count
.Fibers: EGFP Length µm
.cuisto
#While you're free to add any measurements as long as they follow the requirements, keep in mind that for atlas regions quantification, cuisto
will only compute, pool and average the following metrics :
It is then up to you to select which metrics among those to compute and display and name them, via the configuration file.
+For punctal detections (eg. objects whose only the centroid is considered), only the atlas coordinates are used, to compute and display spatial distributions of objects across the brain (using their classifications to give each distributions different hues).
+For fibers-like objects, it requires to export the lines detections atlas coordinates as JSON files, with the exportFibersAtlasCoordinates.groovy
script (this is done automatically when using the pipeline).
The groovy script under scripts/qupath-utils/measurements/addRegionsCount.groovy
will add a properly formatted count of objects of selected classifications in all atlas regions. This is used for punctual objects (polygons or points), for example objects created in QuPath or with the segmentation script.
The groovy script under scripts/qupath-utils/measurements/addRegionsLength.groovy
will add the properly formatted cumulated lenghth in microns of fibers-like objects in all atlas regions. This is used for polylines objects, for example generated with the segmentation script.
Keeping in mind cuisto
limitations, you can add any measurements you'd like.
For example, you can run a pixel classifier in all annotations (eg. atlas regions). Using the Measure
button, it will add a measurement of the area covered by classified pixels. Then, you can use the script located under scripts/qupath-utils/measurements/renameMeasurements.groovy
to rename the generated measurements with a properly-formatted name. Finally, you can export regions measurements.
Since cuisto
will compute a "density", eg. the measurement divided by the region area, in this case, it will correspond to the fraction of surface occupied by classified pixels. This is showcased in the Examples.
Once you imported atlas regions registered with ABBA, detected objects in your images and added properly formatted measurements to detections and annotations, you can :
+Measure > Export measurements
Output file
(specify in the file name if it is a detections or annotations file)Detections
or Annoations
in Export type
Export
Do this for both Detections and Annotations, you can then use those files with cuisto
(see the Examples).
The QuPath documentation is quite extensive, detailed, very well explained and contains full guides on how to create a QuPath project and how to find objects of interests. It is therefore a highly recommended read, nevertheless, you will find below some quick reminders.
+QuPath works with projects. It is basically a folder with a main project.qproj
file, which is a JSON file that contains all the data about your images except the images themselves. Algonside, there is a data
folder with an entry for each image, that stores the thumbnails, metadata about the image and detections and annotations but, again, not the image itself. The actual images can be stored anywhere (including a remote server), the QuPath project merely contains the information needed to fetch them and display them. QuPath will never modify your image data.
This design makes the QuPath project itself lightweight (should never exceed 500MB even with millions of detections), and portable : upon opening, if QuPath is not able to find the images where they should be, it will ask for their new locations.
+Tip
+It is recommended to create the QuPath project locally on your computer, to avoid any risk of conflicts if two people open it at the same time. Nevertheless, you should backup the project regularly on a remote server.
+To create a new project, simply drag & drop an empty folder into QuPath window and accept to create a new empty project. Then, add images :
+Add images
, then Choose files
on the bottom. Drag & drop does not really work as the images will not be sorted properly.Then, choose the following options :
+Image server
Default (let QuPath decide)
+Set image type
Most likely, fluorescence
+Rotate image
No rotation (unless all your images should be rotated)
+Optional args
Leave empty
+Auto-generate pyramids
Uncheck
+Import objects
Uncheck
+Show image selector
Might be useful to check if the images are read correctly (mostly for CZI files).
+QuPath has a built-in cell detection feature, available in Analyze > Cell detection
. You hava a full tutorial in the official documentation.
Briefly, this uses a watershed algorithm to find bright spots and can perform a cell expansion to estimate the full cell shape based on the detected nuclei. Therefore, this works best to segment nuclei but one can expect good performance for cells as well, depending on the imaging and staining conditions.
+Tip
+In scripts/qupath-utils/segmentation
, there is watershedDetectionFilters.groovy
which uses this feature from a script. It further allows you to filter out detected cells based on shape measurements as well as fluorescence itensity in several channels and cell compartments.
Another very powerful and versatile way to segment cells if through machine learning. Note the term "machine" and not "deep" as it relies on statistics theory from the 1980s. QuPath provides an user-friendly interface to that, similar to what ilastik provides.
+The general idea is to train a model to classify every pixel as a signal or as background. You can find good resources on how to procede in the official documentation and some additionnal tips and tutorials on Michael Neslon's blog (here and here).
+Specifically, you will manually annotate some pixels of objects of interest and background. Then, you will apply some image processing filters (gaussian blur, laplacian...) to reveal specific features in your images (shapes, textures...). Finally, the pixel classifier will fit a model on those pixel values, so that it will be able to predict if a pixel, given the values with the different filters you applied, belongs to an object of interest or to the background.
+This is done in an intuitive GUI with live predictions to get an instant feedback on the effects of the filters and manual annotations.
+First and foremost, you should use a QuPath project dedicated to the training of a pixel classifier, as it is the only way to be able to edit it later on.
+Classify > Pixel classification > Train pixel classifier
, and turn on Live prediction
.Load training
.Advanced settings
, check Reweight samples
to help make sure a classification is not over-represented.Classifier
: typically, RTrees
or ANN_MLP
. This can be changed dynamically afterwards to see which works best for you.Resolution
: this is the pixel size used. This is a trade-off between accuracy and speed. If your objects are only composed of a few pixels, you'll the full resolution, for big objects reducing the resolution will be faster.Features
: this is the core of the process -- where you choose the filters. In Edit
, you'll need to choose :Output
:Classification
: QuPath will directly classify the pixels. Use that to create objects directly from the pixel classifier within QuPath.Probability
: this will output an image where each pixel is its probability to belong to each of the classifications. This is useful to create objects externally.Show classification
once you begin to make annotations.Begin to annotate ! Use the Polyline annotation tool (V) to classify some pixels belonging to an object and some pixels belonging to the background across your images.
+Tip
+You can select the RTrees
Classifier, then Edit
: check the Calculate variable importance
checkbox. Then in the log (Ctrl+Shift+L), you can inspect the weight each features have. This can help discard some filters to keep only the ones most efficient to distinguish the objects of interest.
See in live the effect of your annotations on the classification using C and continue until you're satisfied.
+Important
+This is machine learning. The lesser annotations, the better, as this will make your model more general and adapt to new images. The goal is to find the minimal number of annotations to make it work.
+Once you're done, give your classifier a name in the text box in the bottom and save it. It will be stored as a JSON file in the classifiers
folder of the QuPath project. This file can be imported in your other QuPath projects.
Once you imported your model JSON file (Classify > Pixel classification > Load pixel classifier
, three-dotted menu and Import from file
), you can create objects out of it, measure the surface occupied by classified pixels in each annotation or classify existing detections based on the prediction at their centroid.
In scripts/qupath-utils/segmentation
, there is a createDetectionsFromPixelClassifier.groovy
script to batch-process your project.
Alternatively, a Python script provided with cuisto
can be used to segment the probability map generated by the pixel classifier (the script is located in scripts/segmentation
).
You will first need to export those with the exportPixelClassifierProbabilities.groovy
script (located in scripts/qupath-utils
).
Then the segmentation script can :
+Several parameters have to be specified by the user, see the segmentation script API reference. This script will generate GeoJson files that can be imported back to QuPath with the importGeojsonFiles.groovy
script.
QuPath being open-source and extensible, there are third-party extensions that implement popular deep learning segmentation algorithms directly in QuPath. They can be used to find objects of interest as detections in the QuPath project and thus integrate nicely with cuisto
to quantify them afterwards.
QuPath extension : https://github.com/qupath/qupath-extension-instanseg
+Original repository : https://github.com/instanseg/instanseg
+Reference papers : doi:10.48550/arXiv.2408.15954, doi:10.1101/2024.09.04.611150
QuPath extension : https://github.com/qupath/qupath-extension-stardist
+Original repository : https://github.com/stardist/stardist
+Reference paper : doi:10.48550/arXiv.1806.03535
There is a stardistDetectionFilter.groovy
script in scripts/qupath-utils/segmentation
to use it from a script which further allows you to filter out detected cells based on shape measurements as well as fluorescence itensity in several channels and cell compartments.
QuPath extension : https://github.com/BIOP/qupath-extension-cellpose
+Original repository : https://github.com/MouseLand/cellpose
+Reference papers : doi:10.1038/s41592-020-01018-x, doi:10.1038/s41592-022-01663-4, doi:10.1101/2024.02.10.579780
There is a cellposeDetectionFilter.groovy
script in scripts/qupath-utils/segmentation
to use it from a script which further allows you to filter out detected cells based on shape measurements as well as fluorescence itensity in several channels and cell compartments.
QuPath extension : https://github.com/ksugar/qupath-extension-sam
+Original repositories : samapi, SAM
+Reference papers : doi:10.1101/2023.06.13.544786, doi:10.48550/arXiv.2304.02643
This is more an interactive annotation tool than a fully automatic segmentation algorithm.
+ + + + + + + + + + + + + + + + + + + + + + + +The ABBA documentation is quite extensive and contains guided tutorials and a video tutorial. You should therefore check it out ! Nevertheless, you will find below some quick reminders.
+Always use ABBA with a QuPath project, if you import the images directly it will not be possible to export the results back to QuPath. In the toolbar, head to Import > Import QuPath Project
.
Warning
+ABBA is not the most stable software, it is highly recommended to save in a different file each time you do anything.
+In the right panel, there is everything related to the images, both yours and the atlas.
+In the Atlas Display
section, you can turn on and off different channels (the first is the reference image, the last is the regions outlines).
+The Displayed slicing [atlas steps]
slider can increase or decrease the number of displayed 2D slices extracted from the 3D volume. It is comfortable to set to to the same spacing as your slices. Remember it is in "altas steps", so for an atlas imaged at 10µm, a 120µm spacing corresponds to 12 atlas steps.
The Slices Display
section lists all your slices. Ctrl+A to select all, and click on the Vis.
header to make them visible. Then, you can turn on and off each channels (generally the NISSL channel and the ChAT channel will be used) by clicking on the corresponding header. Finally, set the display limits clicking on the empty header containing the colors.
Right Button in the main view to Change overlap mode
twice to get the slices right under the atlas slices.
Tip
+Every action in ABBA are stored and are cancellable with Right Button+Z, except the Interactive transform.
+This is the hardest task. You need to drag the slices along the rostro-caudal axis and modify the virtual slicing angle (X Rotation [deg]
and Y Rotation [deg]
sliders at the bottom of the right panel) until you match the brain structures observed in both your images and the atlas.
Tip
+With a high number of slices, most likely, it will be impossible to find a position and slicing angle that works for all your slices. In that case, you should procede in batch, eg. sub-stack of images with a unique position and slicing angle that works for all images in the sub-stack. Then, remove the remaining slices (select them, Right Button > Remove Selected Slices
), but do not remove them from the QuPath project.
Procede as usual, including saving (note the slices range it corresponds to) and exporting the registration back to QuPath. Then, reimport the project in a fresh ABBA instance, remove the slices that were already registered and redo the whole process with the next sub-stack and so on.
+Once you found the correct position and slicing angle, it must not change anymore, otherwise the registration operations you perform will not make any sense anymore.
+The next step is to deform your slices to match the corresponding atlas image, extracted from the 3D volume given the position and virtual slicing angle defined at the previous step.
+Info
+ABBA makes the choice to deform your slices to the atlas, but the transformations are invertible. This means that you will still be able to work on your raw data and deform the altas onto it instead.
+In image processing, there are two kinds of deformation one can apply on an image :
+Both can be applied manually or automatically (if the imaging quality allows it). +You have different tools to achieve this, all of which can be combined in any order, except the Interactive transform tool (coarse, linear manual deformation).
+Change the overlap mode (Right Button) to overlay the slice onto the atlas regions borders. Select the slice you want to align.
+While not mandatory, if this tool shall be used, it must be before any operation as it is not cancellable.
+Head to Register > Affine > Interactive transform
.
+This will open a box where you can rotate, translate and resize the image to make a first, coarse alignment.
Close the box. Again, this is not cancellable. Afterwards, you're free to apply any numbers of transformations in any order.
+This uses the elastix toolbox to compute the transformations needed to best match two images. It is available in both affine and spline mode, in the Register > Affine
and Register > Spline
menus respectively.
In both cases, it will open a dialog where you need to choose :
+For the Spline mode, there an additional parameter :
+This uses BigWarp to manually deform the images with the mouse. It can be done from scratch (eg. you place the points yourself) or from a previous registration (either a previous BigWarp session or elastix in Spline mode).
+Register > Spline > BigWarp registration
to launch the tool. Choose the atlas that allows you to best see the brain structures (usually the regions outlines channels, the last one), and the reference fluorescence channel.
It will open two viewers, called "BigWarp moving image" and "BigWarp fixed image". Briefly, they correspond to the two spaces you're working in, the "Atlas space" and the "Slice space".
+Tip
+Do not panick yet, while the explanations might be confusing (at least they were to me), in practice, it is easy, intuitive and can even be fun (sometimes, at small dose).
+To browse the viewer, use Right Button + drag (Left Button is used to rotate the viewer), Middle Button zooms in and out.
+The idea is to place points, called landmarks, that always go in pairs : one in the moving image and one where it corresponds to in the fixed image (or vice-versa). In practice, we will only work in the BigWarp fixed image viewer to place landmarks in both space in one click, then drag it to the corresponding location, with a live feedback of the transformation needed to go from one to another.
+To do so :
+Press Space to switch to the "Landmark mode".
+Warning
+In "Landmark mode", Right Button can't be used to browse the view anymore. To do so, turn off the "Landmark mode" hitting Space again.
+Use Ctrl+Left Button to place a landmark.
+Info
+At least 4 landmarks are needed before activating the live-transform view.
+When there are at least 4 landmarks, hit T to activate the "Transformed" view. Transformed
will be written at the bottom.
OK
.Important remarks and tips
+Head to Register > Edit last Registration
to work on a previous registration.
If the previous registration was done with elastix (Spline) or BigWarp, it will launch the BigWarp interface exactly like above, but with landmarks already placed, either on a grid (elastix) or the one you manually placed (BigWarp).
+Tip
+It will ask which channels to use, you can modify the channel for your slices to work on two channels successively. For instance, one could make a first registration using the NISSL staining, then refine the motoneurons with the ChAT staining, if available.
+ABBA can save the state you're in, from the File > Save State
menu. It will be saved as a .abba
file, which is actually a zip archive containing a bunch of JSON, listing every actions you made and in which order, meaning you will stil be able to cancel actions after quitting ABBA.
To load a state, quit ABBA, launch it again, then choose File > Load State
and select the .abba
file to carry on with the registration.
Save, save, save !
+Those state files are cheap, eg. they are lightweight (less than 200KB). You should save the state each time you finish a slice, and you can keep all your files, without overwritting the previous ones, appending a number to its file name. This will allow to roll back to the previous slice in the event of any problem you might face.
+Once you are satisfied with your registration, select the registered slices and head to Export > QuPath > Export Registrations To QuPath Project
. Check the box to make sure to get the latest registered regions.
It will export several files in the QuPath projects, including the transformed atlas regions ready to be imported in QuPath and the transformations parameters to be able to convert coordinates from the extension.
+Make sure you installed the ABBA extension in QuPath.
+From your project with an image open, the basic usage is to head to Extensions > ABBA > Load Atlas Annotations into Open Image
.
+Choose to Split Left and Right Regions
to make the two hemispheres independent, and choose the "acronym" to name the regions. The registered regions should be imported as Annotations in the image.
Tip
+With ABBA in regular Fiji using the CCFv3 Allen mouse brain atlas, the left and right regions are flipped, because ABBA considers the slices as backward facing. The importAbba.groovy
script located in scripts/qupath-utils-atlas
allows you to flip left/right regions names. This is OK because the Allen brain is symmetrical by construction.
For more complex use, check the Groovy scripts in scripts/qupath-utils/atlas
. ABBA registration is used throughout the guides, to either work with brain regions (and count objects for instance) or to get the detections' coordinates in the atlas space.
Info
+The documentation is under construction.
+cuisto
is a Python package aiming at quantifying histological data.
After ABBA registration of 2D histological slices and QuPath objects' detection, cuisto
is used to :
This documentation contains cuisto
installation instructions, ABBA installation instructions, guides to prepare images for the pipeline, detect objects with QuPath, register 2D slices on a 3D atlas with ABBA, along with examples.
In theory, cuisto
should work with any measurements table with the required columns, but has been designed with ABBA and QuPath in mind.
Due to the IT environment of the laboratory, this documentation is very Windows-oriented but most of it should be applicable to Linux and MacOS as well by slightly adapting terminal commands.
+ +The documentation outline is on the left panel, you can click on items to browse it. In each page, you'll get the table of contents on the right panel.
+cuisto
has been primarly developed by Guillaume Le Goc in Julien Bouvier's lab at NeuroPSI. The clever name was found by Aurélie Bodeau.
The documentation itself is built with MkDocs using the Material theme.
+ + + + + + + + + + + + + + + + + + + + + + + +While cuisto
does not have a reference paper as of now, you can reference the GitHub repository.
Please make sure to cite all the softwares used in your research. Citations are usually the only metric used by funding agencies, so citing properly the tools used in your research ensures the continuation of those projects.
+There are three configuration files : altas_blacklist
, atlas_fusion
and a modality-specific file, that we'll call config
in this document. The former two are related to the atlas you're using, the latter is what is used by cuisto
to know what and how to compute and display things. There is a fourth, optional, file, used to provide some information on a specific experiment, info
.
The configuration files are in the TOML file format, that are basically text files formatted in a way that is easy to parse in Python. See here for a basic explanation of the syntax.
+Most lines of each template file are commented to explain what each parameter do.
+# TOML file to list Allen brain regions to ignore during analysis.
+#
+# It is used to blacklist regions and all descendants regions ("WITH_CHILD").
+# Objects belonging to those regions and their descendants will be discarded.
+# And you can specify an exact region where to remove objects ("EXACT"),
+# descendants won't be affected.
+# Use it to remove noise in CBX, ventricual systems and fiber tracts.
+# Regions are referenced by their exact acronym.
+#
+# Syntax :
+# [WITH_CHILDS]
+# members = ["CBX", "fiber tracts", "VS"]
+#
+# [EXACT]
+# members = ["CB"]
+
+
+[WITH_CHILDS]
+members = ["CBX", "fiber tracts", "VS"]
+
+[EXACT]
+members = ["CB"]
+
This file is used to filter out specified regions and objects belonging to them.
+members
keys will be ignored. Objects whose parents are in here will be ignored as well.[WITH_CHILDS]
section, regions and objects belonging to those regions and all descending regions (child regions, as per the altas hierarchy) will be removed.[EXACT]
section, only regions and objects belonging to those exact regions are removed. Descendants regions are not taken into account.# TOML file to determine which brain regions should be merged together.
+# Regions are referenced by their exact acronym.
+# The syntax should be the following :
+#
+# [MY]
+# name = "Medulla" # new or existing full name
+# acronym = "MY" # new or existing acronym
+# members = ["MY-mot", "MY-sat"] # existing Allen Brain acronyms that should belong to the new region
+#
+# Then, regions labelled "MY-mot" and "MY-sat" will be labelled "MY" and will join regions already labelled "MY".
+# What's in [] does not matter but must be unique and is used to group.
+# The new "name" and "acronym" can be existing Allen Brain regions or a new (meaningful) one.
+# Note that it is case sensitive.
+
+[PHY]
+name = "Perihypoglossal nuclei"
+acronym = "PHY"
+members = ["NR", "PRP"]
+
+[NTS]
+name = "Nucleus of the solitary tract"
+acronym = "NTS"
+members = ["ts", "NTSce", "NTSco", "NTSge", "NTSl", "NTSm"]
+
+[AMB]
+name = "Nucleus ambiguus"
+acronym = "AMB"
+members = ["AMBd", "AMBv"]
+
+[MY]
+name = "Medulla undertermined"
+acronym = "MYu"
+members = ["MY-mot", "MY-sat"]
+
+[IRN]
+name = "Intermediate reticular nucleus"
+acronym = "IRN"
+members = ["IRN", "LIN"]
+
This file is used to group regions together, to customize the atlas' hierarchy. It is particularly useful to group smalls brain regions that are impossible to register precisely.
+Keys name
, acronym
and members
should belong to a [section]
.
[section]
is just for organizing, the name does not matter but should be unique.name
should be a human-readable name for your new region.acronym
is how the region will be refered to. It can be a new acronym, or an existing one.members
is a list of acronyms of atlas regions that should be part of the new one.########################################################################################
+# Configuration file for cuisto package
+# -----------------------------------------
+# This is a TOML file. It maps a key to a value : `key = value`.
+# Each key must exist and be filled. The keys' names can't be modified, except:
+# - entries in the [channels.names] section and its corresponding [channels.colors] section,
+# - entries in the [regions.metrics] section.
+#
+# It is strongly advised to NOT modify this template but rather copy it and modify the copy.
+# Useful resources :
+# - the TOML specification : https://toml.io/en/
+# - matplotlib colors : https://matplotlib.org/stable/gallery/color/color_demo.html
+#
+# Configuration file part of the python cuisto package.
+# version : 2.1
+########################################################################################
+
+object_type = "Cells" # name of QuPath base classification (eg. without the ": subclass" part)
+segmentation_tag = "cells" # type of segmentation, matches directory name, used only in the full pipeline
+
+[atlas] # information related to the atlas used
+name = "allen_mouse_10um" # brainglobe-atlasapi atlas name
+type = "brain" # brain or cord (eg. registration done in ABBA or abba_python)
+midline = 5700 # midline Z coordinates (left/right limit) in microns
+outline_structures = ["root", "CB", "MY", "P"] # structures to show an outline of in heatmaps
+
+[channels] # information related to imaging channels
+[channels.names] # must contain all classifications derived from "object_type"
+"marker+" = "Positive" # classification name = name to display
+"marker-" = "Negative"
+[channels.colors] # must have same keys as names' keys
+"marker+" = "#96c896" # classification name = matplotlib color (either #hex, color name or RGB list)
+"marker-" = "#688ba6"
+
+[hemispheres] # information related to hemispheres
+[hemispheres.names]
+Left = "Left" # Left = name to display
+Right = "Right" # Right = name to display
+[hemispheres.colors] # must have same keys as names' keys
+Left = "#ff516e" # Left = matplotlib color (either #hex, color name or RGB list)
+Right = "#960010" # Right = matplotlib color
+
+[distributions] # spatial distributions parameters
+stereo = true # use stereotaxic coordinates (Paxinos, only for brain)
+ap_lim = [-8.0, 0.0] # bins limits for anterio-posterior
+ap_nbins = 75 # number of bins for anterio-posterior
+dv_lim = [-1.0, 7.0] # bins limits for dorso-ventral
+dv_nbins = 50 # number of bins for dorso-ventral
+ml_lim = [-5.0, 5.0] # bins limits for medio-lateral
+ml_nbins = 50 # number of bins for medio-lateral
+hue = "channel" # color curves with this parameter, must be "hemisphere" or "channel"
+hue_filter = "Left" # use only a subset of data. If hue=hemisphere : channel name, list of such or "all". If hue=channel : hemisphere name or "both".
+common_norm = true # use a global normalization for each hue (eg. the sum of areas under all curves is 1)
+[distributions.display]
+show_injection = false # add a patch showing the extent of injection sites. Uses corresponding channel colors
+cmap = "OrRd" # matplotlib color map for heatmaps
+cmap_nbins = 50 # number of bins for heatmaps
+cmap_lim = [1, 50] # color limits for heatmaps
+
+[regions] # distributions per regions parameters
+base_measurement = "Count" # the name of the measurement in QuPath to derive others from
+hue = "channel" # color bars with this parameter, must be "hemisphere" or "channel"
+hue_filter = "Left" # use only a subset of data. If hue=hemisphere : channel name, list of such or "all". If hue=channel : hemisphere name or "both".
+hue_mirror = false # plot two hue_filter in mirror instead of discarding the other
+normalize_starter_cells = false # normalize non-relative metrics by the number of starter cells
+[regions.metrics] # names of metrics. Do not change the keys !
+"density µm^-2" = "density µm^-2"
+"density mm^-2" = "density mm^-2"
+"coverage index" = "coverage index"
+"relative measurement" = "relative count"
+"relative density" = "relative density"
+[regions.display]
+nregions = 18 # number of regions to display (sorted by max.)
+orientation = "h" # orientation of the bars ("h" or "v")
+order = "max" # order the regions by "ontology" or by "max". Set to "max" to provide a custom order
+dodge = true # enforce the bar not being stacked
+log_scale = false # use log. scale for metrics
+[regions.display.metrics] # name of metrics to display
+"count" = "count" # real_name = display_name, with real_name the "values" in [regions.metrics]
+"density mm^-2" = "density (mm^-2)"
+
+[files] # full path to information TOML files
+blacklist = "../../atlas/atlas_blacklist.toml"
+fusion = "../../atlas/atlas_fusion.toml"
+outlines = "/data/atlases/allen_mouse_10um_outlines.h5"
+infos = "../../configs/infos_template.toml"
+
This file is used to configure cuisto
behavior. It specifies what to compute, how, and display parameters such as colors associated to each classifications, hemisphere names, distributions bins limits...
Warning
+When editing your config.toml file, you're allowed to modify the keys only in the [channels]
section.
object_type
: name of QuPath base classification (eg. without the ": subclass" part)
+segmentation_tag
: type of segmentation, matches directory name, used only in the full pipeline
name
: brainglobe-atlasapi atlas name
+type
: "brain" or "cord" (eg. registration done in ABBA or abba_python). This will determine whether to flip Left/Right when determining detections hemisphere based on their coordinates. Also adapts the axes in the 2D heatmaps.
+midline
: midline Z coordinates (left/right limit) in microns to determine detections hemisphere based on their coordinates.
+outline_structures
: structures to show an outline of in heatmaps
subclassification name = name to display on the plots
+"marker+"
: classification name = name to display
+"marker-"
: add any number of sub-classification
subclassification name = color
, with color specified as a matplotlib named color, an RGB list or an hex code.
+"marker+"
: classification name = matplotlib color
+"marker-"
: must have the same entries as "names".
Left
: Left = name to display
+Right
: Right = name to display
Left
: ff516e" # Left = matplotlib color (either #hex, color name or RGB list)
+Right
: 960010" # Right = matplotlib color
stereo
: use stereotaxic coordinates (as in Paxinos, only for mouse brain CCFv3)
+ap_lim
: bins limits for anterio-posterior in mm
+ap_nbins
: number of bins for anterio-posterior
+dv_lim
: bins limits for dorso-ventral in mm
+dv_nbins
: number of bins for dorso-ventral
+ml_lim
: bins limits for medio-lateral in mm
+ml_nbins
: number of bins for medio-lateral
+hue
: color curves with this parameter, must be "hemisphere" or "channel"
+hue_filter
: use only a subset of data
common_norm
: use a global normalization (eg. the sum of areas under all curves is 1). Otherwise, normalize each hue individually
show_injection
: add a patch showing the extent of injection sites. Uses corresponding channel colors. Requires the information TOML configuration file set up
+cmap
: matplotlib color map for 2D heatmaps
+cmap_nbins
: number of bins for 2D heatmaps
+cmap_lim
: color limits for 2D heatmaps
base_measurement
: the name of the measurement in QuPath to derive others from. Usually "Count" or "Length µm"
+hue
: color bars with this parameter, must be "hemisphere" or "channel"
+hue_filter
: use only a subset of data
hue_mirror
: plot two hue_filter in mirror instead of discarding the others. For example, if hue=channel and hue_filter="both", plots the two hemisphere in mirror.
+normalize_starter_cells
: normalize non-relative metrics by the number of starter cells
"density µm^-2"
: relevant name
+"density mm^-2"
: relevant name
+"coverage index"
: relevant name
+"relative measurement"
: relevant name
+"relative density"
: relevant name
nregions
: number of regions to display (sorted by max.)
+orientation
: orientation of the bars ("h" or "v")
+order
: order the regions by "ontology" or by "max". Set to "max" to provide a custom order
+dodge
: enforce the bar not being stacked
+log_scale
: use log. scale for metrics
"count"
: real_name = display_name, with real_name the "values" in [regions.metrics]
+"density mm^-2"
blacklist
+fusion
+outlines
+infos
# TOML file to specify experimental settings of each animals.
+# Syntax should be :
+# [animalid0] # animal ID
+# slice_thickness = 30 # slice thickness in microns
+# slice_spacing = 60 # spacing between two slices in microns
+# [animalid0.marker-name] # [{Animal id}.{segmented channel name}]
+# starter_cells = 190 # number of starter cells
+# injection_site = [x, y, z] # approx. injection site in CCFv3 coordinates
+#
+# --------------------------------------------------------------------------
+[animalid0]
+slice_thickness = 30
+slice_spacing = 60
+[animalid0."marker+"]
+starter_cells = 150
+injection_site = [ 10.8937328, 6.18522070, 6.841855301 ]
+[animalid0."marker-"]
+starter_cells = 175
+injection_site = [ 10.7498512, 6.21545461, 6.815487203 ]
+# --------------------------------------------------------------------------
+[animalid1-SC]
+slice_thickness = 30
+slice_spacing = 120
+[animalid1-SC.EGFP]
+starter_cells = 250
+injection_site = [ 10.9468211, 6.3479642, 6.0061113 ]
+[animalid1-SC.DsRed]
+starter_cells = 275
+injection_site = [ 10.9154874, 6.2954872, 8.1587125 ]
+# --------------------------------------------------------------------------
+
This file is used to specify injection sites for each animal and each channel, to display it in distributions.
+ + + + + + + + + + + + + + + + + + + + + + + +For help in QuPath, ABBA, Fiji or any image processing-related questions, your one stop is the image.sc forum. There, you can search with specific tags (#qupath
, #abba
, ...). You can also ask questions or even answer to some by creating an account !
For help with cuisto
in particular, you can open an issue in Github (which requires an account as well), or send an email to me or Antoine Lesage.
cuisto-xxx
folder :
+
+If you want to build the doc :
+Tip
+If all goes well, you shouldn't need any admin rights to install the various pieces of software used before cuisto
.
Important
+Remember to cite all softwares you use ! See Citing.
+QuPath is an "open source software for bioimage analysis". You can install it from the official website : https://qupath.github.io/.
+The documentation is quite clear and comprehensive : https://qupath.readthedocs.io/en/stable/index.html.
This is where you'll create QuPath projects, in which you'll be able to browse your images, annotate them, import registered brain regions and find objects of interests (via automatic segmentation, thresholding, pixel classification, ...). Then, those annotations and detections can be exported to be processed by cuisto
.
This is the tool you'll use to register 2D histological sections to 3D atlases. See the dedicated page.
+conda
)#The cuisto
package is written in Python. It depends on scientific libraries (such as NumPy, pandas and many more). Those libraries need to be installed in versions that are compatible with each other and with cuisto
. To make sure those versions do not conflict with other Python tools you might be using (deeplabcut
, abba_python
, ...), we will install cuisto
and its dependencies in a dedicated virtual environment.
conda
is a software that takes care of this. It comes with a "base" environment, from which we will create and manage other, project-specific environments. It is also used to download and install python in each of those environments, as well as third-party libraries. conda
in itself is free and open-source and can be used freely by anyone.
It is included with the Anaconda distribution, which is subject to specific terms of service, which state that unless you're an individual, a member of a company with less than 200 employees or a member of an university (but not a national research lab) it's free to use, otherwise, you need to pay a licence. conda
, while being free, is by default configured to use the "defaults" channel to fetch the packages (including Python itself), a repository operated by Anaconda, which is, itself, subject to the Anaconda terms of service.
In contrast, conda-forge is a community-run repository that contains more numerous and more update-to-date packages. This is free to use for anyone. The idea is to use conda
directly (instead of Anaconda graphical interface) and download packages from conda-forge (instead of the Anaconda-run defaults). To try to decipher this mess, Anaconda provides this figure :
Furthermore, the "base" conda environment installed with the Anaconda distribution is bloated and already contains tons of libraries, and tends to self-destruct at some point (eg. becomes unable to resolve the inter-dependencies), which makes you unable to install new libraries nor create new environments.
+This is why it is highly recommended to install Miniconda instead, a minimal installer for conda, and configure it to use the free, community-run channel conda-forge, or, even better, use Miniforge which is basically the same but pre-configured to use conda-forge. The only downside is that will not get the Anaonda graphical user interface and you'll need to use the terminal instead, but worry not ! We got you covered.
+Tip
+If Anaconda is already installed and you don't have the rights to uninstall it, you'll have to use it instead. You can launch the "Anaconda Prompt (PowerShell)", run conda init
. Open a regular PowerShell window and run conda config --add channels conda-forge
, so that subsequent installations and environments creation will fetch required dependencies from conda-forge.
This section explains how to actually install the cuisto
package.
+The following commands should be run from a terminal (PowerShell). Remember that the -c conda-forge
bits are not necessary if you installed conda with the miniforge distribution.
cuisto
Source code .zip package, from the Releases page.cuisto-env
environment we just created. First, you need to activate the cuisto-env
environment :
+
+Now, the prompt should look like this :
+
+This means that Python packages will now be installed in the cuisto-env
environment and won't conflict with other toolboxes you might be using.
+Then, we use pip
to install cuisto
. pip
was installed with Python, and will scan the cuisto
folder, specifically the "pyproject.toml" file that lists all the required dependencies. To do so, you can either :cuisto
folder, use Shift+Right Button to "Open PowerShell window here" and run :
+cuisto
is now installed inside the cuisto-env
environment and will be available in Python from that environment !
Tip
+You will need to perform step 3. each time you want to update the package.
+If you already have registered data and cells in QuPath, you can export Annotations and Detections as TSV files and head to the Example section.
+ + + + + + + + + + + + + + + + + + + + + + + +A Jupyter notebook is a way to use Python in an interactive manner. It uses cells that contain Python code, and that are to be executed to immediately see the output, including figures.
+You can see some rendered notebooks in the examples here, but you can also download them (downward arrow button on the top right corner of each notebook) and run them locally with your own data.
+To do so, you can either use an integrated development environment (basically a supercharged text editor) that supports Jupyter notebooks, or directly the Jupyter web interface.
+You can use for instance Visual Studio Code, also known as vscode.
+cd Documents\notebooks
or, in the file explorer in your "notebooks" folder, Shift+Right Button to "Open PowerShell window here")Info
The documentation is under construction.
cuisto
is a Python package aiming at quantifying histological data.
After ABBA registration of 2D histological slices and QuPath objects' detection, cuisto
is used to :
This documentation contains cuisto
installation instructions, ABBA installation instructions, guides to prepare images for the pipeline, detect objects with QuPath, register 2D slices on a 3D atlas with ABBA, along with examples.
In theory, cuisto
should work with any measurements table with the required columns, but has been designed with ABBA and QuPath in mind.
Due to the IT environment of the laboratory, this documentation is very Windows-oriented but most of it should be applicable to Linux and MacOS as well by slightly adapting terminal commands.
"},{"location":"index.html#documentation-navigation","title":"Documentation navigation","text":"The documentation outline is on the left panel, you can click on items to browse it. In each page, you'll get the table of contents on the right panel.
"},{"location":"index.html#useful-external-resources","title":"Useful external resources","text":"cuisto
has been primarly developed by Guillaume Le Goc in Julien Bouvier's lab at NeuroPSI. The clever name was found by Aur\u00e9lie Bodeau.
The documentation itself is built with MkDocs using the Material theme.
"},{"location":"api-compute.html","title":"cuisto.compute","text":"compute module, part of cuisto.
Contains actual computation functions.
"},{"location":"api-compute.html#cuisto.compute.get_distribution","title":"get_distribution(df, col, hue, hue_filter, per_commonnorm, binlim, nbins=100)
","text":"Computes distribution of objects.
A global distribution using only col
is computed, then it computes a distribution distinguishing values in the hue
column. For the latter, it is possible to use a subset of the data ony, based on another column using hue_filter
. This another column is determined with hue
, if the latter is \"hemisphere\", then hue_filter
is used in the \"channel\" color and vice-versa. per_commonnorm
controls how they are normalized, either as a whole (True) or independantly (False).
Use cases : (1) single-channel, two hemispheres : col=x
, hue=hemisphere
, hue_filter=\"\"
, per_commonorm=True
. Computes a distribution for each hemisphere, the sum of the area of both is equal to 1. (2) three-channels, one hemisphere : col=x
, hue=channel
, hue_filter=\"Ipsi.\", per_commonnorm=False
. Computes a distribution for each channel only for points in the ipsilateral hemisphere. Each curve will have an area of 1.
Parameters:
Name Type Description Defaultdf
DataFrame
required col
str
Key in df
, used to compute the distributions.
hue
str
Key in df
. Criterion for additional distributions.
hue_filter
str
Further filtering for \"per\" distribution. - hue = channel : value is the name of one of the hemisphere - hue = hemisphere : value can be the name of a channel, a list of such or \"all\"
requiredper_commonnorm
bool
Use common normalization for all hues (per argument).
requiredbinlim
list or tuple
First bin left edge and last bin right edge.
requirednbins
int
Number of bins. Default is 100.
100
Returns:
Name Type Descriptiondf_distribution
DataFrame
DataFrame with bins
, distribution
, count
and their per-hemisphere or per-channel variants.
cuisto/compute.py
def get_distribution(\n df: pd.DataFrame,\n col: str,\n hue: str,\n hue_filter: dict,\n per_commonnorm: bool,\n binlim: tuple | list,\n nbins=100,\n) -> pd.DataFrame:\n \"\"\"\n Computes distribution of objects.\n\n A global distribution using only `col` is computed, then it computes a distribution\n distinguishing values in the `hue` column. For the latter, it is possible to use a\n subset of the data ony, based on another column using `hue_filter`. This another\n column is determined with `hue`, if the latter is \"hemisphere\", then `hue_filter` is\n used in the \"channel\" color and vice-versa.\n `per_commonnorm` controls how they are normalized, either as a whole (True) or\n independantly (False).\n\n Use cases :\n (1) single-channel, two hemispheres : `col=x`, `hue=hemisphere`, `hue_filter=\"\"`,\n `per_commonorm=True`. Computes a distribution for each hemisphere, the sum of the\n area of both is equal to 1.\n (2) three-channels, one hemisphere : `col=x`, hue=`channel`,\n `hue_filter=\"Ipsi.\", per_commonnorm=False`. Computes a distribution for each channel\n only for points in the ipsilateral hemisphere. Each curve will have an area of 1.\n\n Parameters\n ----------\n df : pandas.DataFrame\n col : str\n Key in `df`, used to compute the distributions.\n hue : str\n Key in `df`. Criterion for additional distributions.\n hue_filter : str\n Further filtering for \"per\" distribution.\n - hue = channel : value is the name of one of the hemisphere\n - hue = hemisphere : value can be the name of a channel, a list of such or \"all\"\n per_commonnorm : bool\n Use common normalization for all hues (per argument).\n binlim : list or tuple\n First bin left edge and last bin right edge.\n nbins : int, optional\n Number of bins. Default is 100.\n\n Returns\n -------\n df_distribution : pandas.DataFrame\n DataFrame with `bins`, `distribution`, `count` and their per-hemisphere or\n per-channel variants.\n\n \"\"\"\n\n # - Preparation\n bin_edges = np.linspace(*binlim, nbins + 1) # create bins\n df_distribution = [] # prepare list of distributions\n\n # - Both hemispheres, all channels\n # get raw count per bins (histogram)\n count, bin_edges = np.histogram(df[col], bin_edges)\n # get normalized count (pdf)\n distribution, _ = np.histogram(df[col], bin_edges, density=True)\n # get bin centers rather than edges to plot them\n bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2\n\n # make a DataFrame out of that\n df_distribution.append(\n pd.DataFrame(\n {\n \"bins\": bin_centers,\n \"distribution\": distribution,\n \"count\": count,\n \"hemisphere\": \"both\",\n \"channel\": \"all\",\n \"axis\": col, # keep track of what col. was used\n }\n )\n )\n\n # - Per additional criterion\n # select data\n df_sub = select_hemisphere_channel(df, hue, hue_filter, False)\n hue_values = df[hue].unique() # get grouping values\n # total number of datapoints in the subset used for additional distribution\n length_total = len(df_sub)\n\n for value in hue_values:\n # select part and coordinates\n df_part = df_sub.loc[df_sub[hue] == value, col]\n\n # get raw count per bins (histogram)\n count, bin_edges = np.histogram(df_part, bin_edges)\n # get normalized count (pdf)\n distribution, _ = np.histogram(df_part, bin_edges, density=True)\n\n if per_commonnorm:\n # re-normalize so that the sum of areas of all sub-parts is 1\n length_part = len(df_part) # number of datapoints in that hemisphere\n distribution *= length_part / length_total\n\n # get bin centers rather than edges to plot them\n bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2\n\n # make a DataFrame out of that\n df_distribution.append(\n pd.DataFrame(\n {\n \"bins\": bin_centers,\n \"distribution\": distribution,\n \"count\": count,\n hue: value,\n \"channel\" if hue == \"hemisphere\" else \"hemisphere\": hue_filter,\n \"axis\": col, # keep track of what col. was used\n }\n )\n )\n\n return pd.concat(df_distribution)\n
"},{"location":"api-compute.html#cuisto.compute.get_regions_metrics","title":"get_regions_metrics(df_annotations, object_type, channel_names, meas_base_name, metrics_names)
","text":"Get a new DataFrame with cumulated axons segments length in each brain regions.
This is the quantification per brain regions for fibers-like objects, eg. axons. The returned DataFrame has columns \"cum. length \u00b5m\", \"cum. length mm\", \"density \u00b5m^-1\", \"density mm^-1\", \"coverage index\".
Parameters:
Name Type Description Defaultdf_annotations
DataFrame
DataFrame with an entry for each brain regions, with columns \"Area \u00b5m^2\", \"Name\", \"hemisphere\", and \"{object_type: channel} Length \u00b5m\".
requiredobject_type
str
Object type (primary classification).
requiredchannel_names
dict
Map between original channel names to something else.
requiredmeas_base_name
str
required metrics_names
dict
required Returns:
Name Type Descriptiondf_regions
DataFrame
DataFrame with brain regions name, area and metrics.
Source code incuisto/compute.py
def get_regions_metrics(\n df_annotations: pd.DataFrame,\n object_type: str,\n channel_names: dict,\n meas_base_name: str,\n metrics_names: dict,\n) -> pd.DataFrame:\n \"\"\"\n Get a new DataFrame with cumulated axons segments length in each brain regions.\n\n This is the quantification per brain regions for fibers-like objects, eg. axons. The\n returned DataFrame has columns \"cum. length \u00b5m\", \"cum. length mm\", \"density \u00b5m^-1\",\n \"density mm^-1\", \"coverage index\".\n\n Parameters\n ----------\n df_annotations : pandas.DataFrame\n DataFrame with an entry for each brain regions, with columns \"Area \u00b5m^2\",\n \"Name\", \"hemisphere\", and \"{object_type: channel} Length \u00b5m\".\n object_type : str\n Object type (primary classification).\n channel_names : dict\n Map between original channel names to something else.\n meas_base_name : str\n metrics_names : dict\n\n Returns\n -------\n df_regions : pandas.DataFrame\n DataFrame with brain regions name, area and metrics.\n\n \"\"\"\n # get columns names\n cols = df_annotations.columns\n # get columns with fibers lengths\n cols_colors = cols[\n cols.str.startswith(object_type) & cols.str.endswith(meas_base_name)\n ]\n # select relevant data\n cols_to_select = pd.Index([\"Name\", \"hemisphere\", \"Area \u00b5m^2\"]).append(cols_colors)\n # sum lengths and areas of each brain regions\n df_regions = (\n df_annotations[cols_to_select]\n .groupby([\"Name\", \"hemisphere\"])\n .sum()\n .reset_index()\n )\n\n # get measurement for both hemispheres (sum)\n df_both = df_annotations[cols_to_select].groupby([\"Name\"]).sum().reset_index()\n df_both[\"hemisphere\"] = \"both\"\n df_regions = (\n pd.concat([df_regions, df_both], ignore_index=True)\n .sort_values(by=\"Name\")\n .reset_index()\n .drop(columns=\"index\")\n )\n\n # rename measurement columns to lower case\n df_regions = df_regions.rename(\n columns={\n k: k.replace(meas_base_name, meas_base_name.lower()) for k in cols_colors\n }\n )\n\n # update names\n meas_base_name = meas_base_name.lower()\n cols = df_regions.columns\n cols_colors = cols[\n cols.str.startswith(object_type) & cols.str.endswith(meas_base_name)\n ]\n\n # convert area in mm^2\n df_regions[\"Area mm^2\"] = df_regions[\"Area \u00b5m^2\"] / 1e6\n\n # prepare metrics\n if \"\u00b5m\" in meas_base_name:\n # fibers : convert to mm\n cols_to_convert = pd.Index([col for col in cols_colors if \"\u00b5m\" in col])\n df_regions[cols_to_convert.str.replace(\"\u00b5m\", \"mm\")] = (\n df_regions[cols_to_convert] / 1000\n )\n metrics = [meas_base_name, meas_base_name.replace(\"\u00b5m\", \"mm\")]\n else:\n # objects : count\n metrics = [meas_base_name]\n\n # density = measurement / area\n metric = metrics_names[\"density \u00b5m^-2\"]\n df_regions[cols_colors.str.replace(meas_base_name, metric)] = df_regions[\n cols_colors\n ].divide(df_regions[\"Area \u00b5m^2\"], axis=0)\n metrics.append(metric)\n metric = metrics_names[\"density mm^-2\"]\n df_regions[cols_colors.str.replace(meas_base_name, metric)] = df_regions[\n cols_colors\n ].divide(df_regions[\"Area mm^2\"], axis=0)\n metrics.append(metric)\n\n # coverage index = measurement\u00b2 / area\n metric = metrics_names[\"coverage index\"]\n df_regions[cols_colors.str.replace(meas_base_name, metric)] = (\n df_regions[cols_colors].pow(2).divide(df_regions[\"Area \u00b5m^2\"], axis=0)\n )\n metrics.append(metric)\n\n # prepare relative metrics columns\n metric = metrics_names[\"relative measurement\"]\n cols_rel_meas = cols_colors.str.replace(meas_base_name, metric)\n df_regions[cols_rel_meas] = np.nan\n metrics.append(metric)\n metric = metrics_names[\"relative density\"]\n cols_dens = cols_colors.str.replace(meas_base_name, metrics_names[\"density mm^-2\"])\n cols_rel_dens = cols_colors.str.replace(meas_base_name, metric)\n df_regions[cols_rel_dens] = np.nan\n metrics.append(metric)\n # relative metrics should be defined within each hemispheres (left, right, both)\n for hemisphere in df_regions[\"hemisphere\"].unique():\n row_indexer = df_regions[\"hemisphere\"] == hemisphere\n\n # relative measurement = measurement / total measurement\n df_regions.loc[row_indexer, cols_rel_meas] = (\n df_regions.loc[row_indexer, cols_colors]\n .divide(df_regions.loc[row_indexer, cols_colors].sum())\n .to_numpy()\n )\n\n # relative density = density / total density\n df_regions.loc[row_indexer, cols_rel_dens] = (\n df_regions.loc[\n row_indexer,\n cols_dens,\n ]\n .divide(df_regions.loc[row_indexer, cols_dens].sum())\n .to_numpy()\n )\n\n # collect channel names\n channels = (\n cols_colors.str.replace(object_type + \": \", \"\")\n .str.replace(\" \" + meas_base_name, \"\")\n .values.tolist()\n )\n # collect measurements columns names\n cols_metrics = df_regions.columns.difference(\n pd.Index([\"Name\", \"hemisphere\", \"Area \u00b5m^2\", \"Area mm^2\"])\n )\n for metric in metrics:\n cols_to_cat = [f\"{object_type}: {cn} {metric}\" for cn in channels]\n # make sure it's part of available metrics\n if not set(cols_to_cat) <= set(cols_metrics):\n raise ValueError(f\"{cols_to_cat} not in DataFrame.\")\n # group all colors in the same colors\n df_regions[metric] = df_regions[cols_to_cat].values.tolist()\n # remove original data\n df_regions = df_regions.drop(columns=cols_to_cat)\n\n # add a color tag, given their names in the configuration file\n df_regions[\"channel\"] = len(df_regions) * [[channel_names[k] for k in channels]]\n metrics.append(\"channel\")\n\n # explode the dataframe so that each color has an entry\n df_regions = df_regions.explode(metrics)\n\n return df_regions\n
"},{"location":"api-compute.html#cuisto.compute.normalize_starter_cells","title":"normalize_starter_cells(df, cols, animal, info_file, channel_names)
","text":"Normalize data by the number of starter cells.
Parameters:
Name Type Description Defaultdf
DataFrame
Contains the data to be normalized.
requiredcols
list - like
Columns to divide by the number of starter cells.
requiredanimal
str
Animal ID to parse the number of starter cells.
requiredinfo_file
str
Full path to the TOML file with informations.
requiredchannel_names
dict
Map between original channel names to something else.
requiredReturns:
Type DescriptionDataFrame
Same df
with normalized count.
cuisto/compute.py
def normalize_starter_cells(\n df: pd.DataFrame, cols: list[str], animal: str, info_file: str, channel_names: dict\n) -> pd.DataFrame:\n \"\"\"\n Normalize data by the number of starter cells.\n\n Parameters\n ----------\n df : pd.DataFrame\n Contains the data to be normalized.\n cols : list-like\n Columns to divide by the number of starter cells.\n animal : str\n Animal ID to parse the number of starter cells.\n info_file : str\n Full path to the TOML file with informations.\n channel_names : dict\n Map between original channel names to something else.\n\n Returns\n -------\n pd.DataFrame\n Same `df` with normalized count.\n\n \"\"\"\n for channel in df[\"channel\"].unique():\n # inverse mapping channel colors : names\n reverse_channels = {v: k for k, v in channel_names.items()}\n nstarters = get_starter_cells(animal, reverse_channels[channel], info_file)\n\n for col in cols:\n df.loc[df[\"channel\"] == channel, col] = (\n df.loc[df[\"channel\"] == channel, col] / nstarters\n )\n\n return df\n
"},{"location":"api-config-config.html","title":"Api config config","text":"object_type
: name of QuPath base classification (eg. without the \": subclass\" part) segmentation_tag
: type of segmentation, matches directory name, used only in the full pipeline
Information related to the atlas used
name
: brainglobe-atlasapi atlas name type
: \"brain\" or \"cord\" (eg. registration done in ABBA or abba_python). This will determine whether to flip Left/Right when determining detections hemisphere based on their coordinates. Also adapts the axes in the 2D heatmaps. midline
: midline Z coordinates (left/right limit) in microns to determine detections hemisphere based on their coordinates. outline_structures
: structures to show an outline of in heatmaps
Information related to imaging channels
namesMust contain all classifications derived from \"object_type\" you want to process. In the form subclassification name = name to display on the plots
\"marker+\"
: classification name = name to display \"marker-\"
: add any number of sub-classification
Must have same keys as \"names\" keys, in the form subclassification name = color
, with color specified as a matplotlib named color, an RGB list or an hex code.
\"marker+\"
: classification name = matplotlib color \"marker-\"
: must have the same entries as \"names\".
Information related to hemispheres, same structure as channels
namesLeft
: Left = name to display Right
: Right = name to display
Must have same keys as names' keys
Left
: ff516e\" # Left = matplotlib color (either #hex, color name or RGB list) Right
: 960010\" # Right = matplotlib color
Spatial distributions parameters
stereo
: use stereotaxic coordinates (as in Paxinos, only for mouse brain CCFv3) ap_lim
: bins limits for anterio-posterior in mm ap_nbins
: number of bins for anterio-posterior dv_lim
: bins limits for dorso-ventral in mm dv_nbins
: number of bins for dorso-ventral ml_lim
: bins limits for medio-lateral in mm ml_nbins
: number of bins for medio-lateral hue
: color curves with this parameter, must be \"hemisphere\" or \"channel\" hue_filter
: use only a subset of data
common_norm
: use a global normalization (eg. the sum of areas under all curves is 1). Otherwise, normalize each hue individually
Display parameters
show_injection
: add a patch showing the extent of injection sites. Uses corresponding channel colors. Requires the information TOML configuration file set up cmap
: matplotlib color map for 2D heatmaps cmap_nbins
: number of bins for 2D heatmaps cmap_lim
: color limits for 2D heatmaps
Distributions per regions parameters
base_measurement
: the name of the measurement in QuPath to derive others from. Usually \"Count\" or \"Length \u00b5m\" hue
: color bars with this parameter, must be \"hemisphere\" or \"channel\" hue_filter
: use only a subset of data
hue_mirror
: plot two hue_filter in mirror instead of discarding the others. For example, if hue=channel and hue_filter=\"both\", plots the two hemisphere in mirror. normalize_starter_cells
: normalize non-relative metrics by the number of starter cells
Names of metrics. The keys are used internally in cuisto as is so should NOT be modified. The values will only chang etheir names in the ouput file
\"density \u00b5m^-2\"
: relevant name \"density mm^-2\"
: relevant name \"coverage index\"
: relevant name \"relative measurement\"
: relevant name \"relative density\"
: relevant name
nregions
: number of regions to display (sorted by max.) orientation
: orientation of the bars (\"h\" or \"v\") order
: order the regions by \"ontology\" or by \"max\". Set to \"max\" to provide a custom order dodge
: enforce the bar not being stacked log_scale
: use log. scale for metrics
name of metrics to display
\"count\"
: real_name = display_name, with real_name the \"values\" in [regions.metrics] \"density mm^-2\"
Full path to information TOML files and atlas outlines for 2D heatmaps.
blacklist
fusion
outlines
infos
config module, part of cuisto.
Contains the Config class.
"},{"location":"api-config.html#cuisto.config.Config","title":"Config(config_file)
","text":"The configuration class.
Reads input configuration file and provides its constant.
Parameters:
Name Type Description Defaultconfig_file
str
Full path to the configuration file to load.
requiredReturns:
Name Type Descriptioncfg
Config object.
Constructor.
Source code incuisto/config.py
def __init__(self, config_file):\n \"\"\"Constructor.\"\"\"\n with open(config_file, \"rb\") as fid:\n cfg = tomllib.load(fid)\n\n for key in cfg:\n setattr(self, key, cfg[key])\n\n self.config_file = config_file\n self.bg_atlas = BrainGlobeAtlas(self.atlas[\"name\"], check_latest=False)\n self.get_blacklist()\n self.get_leaves_list()\n
"},{"location":"api-config.html#cuisto.config.Config.get_blacklist","title":"get_blacklist()
","text":"Wraps cuisto.utils.get_blacklist.
Source code incuisto/config.py
def get_blacklist(self):\n \"\"\"Wraps cuisto.utils.get_blacklist.\"\"\"\n\n self.atlas[\"blacklist\"] = utils.get_blacklist(\n self.files[\"blacklist\"], self.bg_atlas\n )\n
"},{"location":"api-config.html#cuisto.config.Config.get_hue_palette","title":"get_hue_palette(mode)
","text":"Get color palette given hue.
Maps hue to colors in channels or hemispheres.
Parameters:
Name Type Description Defaultmode
(hemisphere, channel)
\"hemisphere\"
Returns:
Name Type Descriptionpalette
dict
Maps a hue level to a color, usable in seaborn.
Source code incuisto/config.py
def get_hue_palette(self, mode: str) -> dict:\n \"\"\"\n Get color palette given hue.\n\n Maps hue to colors in channels or hemispheres.\n\n Parameters\n ----------\n mode : {\"hemisphere\", \"channel\"}\n\n Returns\n -------\n palette : dict\n Maps a hue level to a color, usable in seaborn.\n\n \"\"\"\n params = getattr(self, mode)\n\n if params[\"hue\"] == \"channel\":\n # replace channels by their new names\n palette = {\n self.channels[\"names\"][k]: v for k, v in self.channels[\"colors\"].items()\n }\n elif params[\"hue\"] == \"hemisphere\":\n # replace hemispheres by their new names\n palette = {\n self.hemispheres[\"names\"][k]: v\n for k, v in self.hemispheres[\"colors\"].items()\n }\n else:\n palette = None\n warnings.warn(f\"hue={self.regions[\"display\"][\"hue\"]} not supported.\")\n\n return palette\n
"},{"location":"api-config.html#cuisto.config.Config.get_injection_sites","title":"get_injection_sites(animals)
","text":"Get list of injection sites coordinates for each animals, for each channels.
Parameters:
Name Type Description Defaultanimals
list of str
List of animals.
requiredReturns:
Name Type Descriptioninjection_sites
dict
{\"x\": {channel0: [x]}, \"y\": {channel1: [y]}}
Source code incuisto/config.py
def get_injection_sites(self, animals: list[str]) -> dict:\n \"\"\"\n Get list of injection sites coordinates for each animals, for each channels.\n\n Parameters\n ----------\n animals : list of str\n List of animals.\n\n Returns\n -------\n injection_sites : dict\n {\"x\": {channel0: [x]}, \"y\": {channel1: [y]}}\n\n \"\"\"\n injection_sites = {\n axis: {channel: [] for channel in self.channels[\"names\"].keys()}\n for axis in [\"x\", \"y\", \"z\"]\n }\n\n for animal in animals:\n for channel in self.channels[\"names\"].keys():\n injx, injy, injz = utils.get_injection_site(\n animal,\n self.files[\"infos\"],\n channel,\n stereo=self.distributions[\"stereo\"],\n )\n if injx is not None:\n injection_sites[\"x\"][channel].append(injx)\n if injy is not None:\n injection_sites[\"y\"][channel].append(injy)\n if injz is not None:\n injection_sites[\"z\"][channel].append(injz)\n\n return injection_sites\n
"},{"location":"api-config.html#cuisto.config.Config.get_leaves_list","title":"get_leaves_list()
","text":"Wraps utils.get_leaves_list.
Source code incuisto/config.py
def get_leaves_list(self):\n \"\"\"Wraps utils.get_leaves_list.\"\"\"\n\n self.atlas[\"leaveslist\"] = utils.get_leaves_list(self.bg_atlas)\n
"},{"location":"api-display.html","title":"cuisto.display","text":"display module, part of cuisto.
Contains display functions, essentially wrapping matplotlib and seaborn functions.
"},{"location":"api-display.html#cuisto.display.add_data_coverage","title":"add_data_coverage(df, ax, colors=None, **kwargs)
","text":"Add lines below the plot to represent data coverage.
Parameters:
Name Type Description Defaultdf
DataFrame
DataFrame with X_min
and X_max
on rows for each animals (on columns).
ax
Axes
Handle to axes where to add the patch.
requiredcolors
list or str or None
Colors for the patches, as a RGB list or hex list. Should be the same size as the number of patches to plot, eg. the number of columns in df
. If None, default seaborn colors are used. If only one element, used for each animal.
None
**kwargs
passed to patches.Rectangle()
{}
Returns:
Name Type Descriptionax
Axes
Handle to updated axes.
Source code incuisto/display.py
def add_data_coverage(\n df: pd.DataFrame, ax: plt.Axes, colors: list | str | None = None, **kwargs\n) -> plt.Axes:\n \"\"\"\n Add lines below the plot to represent data coverage.\n\n Parameters\n ----------\n df : pandas.DataFrame\n DataFrame with `X_min` and `X_max` on rows for each animals (on columns).\n ax : Axes\n Handle to axes where to add the patch.\n colors : list or str or None, optional\n Colors for the patches, as a RGB list or hex list. Should be the same size as\n the number of patches to plot, eg. the number of columns in `df`. If None,\n default seaborn colors are used. If only one element, used for each animal.\n **kwargs : passed to patches.Rectangle()\n\n Returns\n -------\n ax : Axes\n Handle to updated axes.\n\n \"\"\"\n # get colors\n ncolumns = len(df.columns)\n if not colors:\n colors = sns.color_palette(n_colors=ncolumns)\n elif isinstance(colors, str) or (isinstance(colors, list) & (len(colors) == 3)):\n colors = [colors] * ncolumns\n elif len(colors) != ncolumns:\n warnings.warn(f\"Wrong number of colors ({len(colors)}), using default colors.\")\n colors = sns.color_palette(n_colors=ncolumns)\n\n # get patch height depending on current axis limits\n ymin, ymax = ax.get_ylim()\n height = (ymax - ymin) * 0.02\n\n for animal, color in zip(df.columns, colors):\n # get patch coordinates\n ymin, ymax = ax.get_ylim()\n ylength = ymax - ymin\n ybottom = ymin - 0.02 * ylength\n xleft = df.loc[\"X_min\", animal]\n xright = df.loc[\"X_max\", animal]\n\n # plot patch\n ax.add_patch(\n patches.Rectangle(\n (xleft, ybottom),\n xright - xleft,\n height,\n label=animal,\n color=color,\n **kwargs,\n )\n )\n\n ax.autoscale(tight=True) # set new axes limits\n\n ax.autoscale() # reset scale\n\n return ax\n
"},{"location":"api-display.html#cuisto.display.add_injection_patch","title":"add_injection_patch(X, ax, **kwargs)
","text":"Add a patch representing the injection sites.
The patch will span from the minimal coordinate to the maximal. If plotted in stereotaxic coordinates, coordinates should be converted beforehand.
Parameters:
Name Type Description DefaultX
list
Coordinates in mm for each animals. Can be empty to not plot anything.
requiredax
Axes
Handle to axes where to add the patch.
required**kwargs
passed to Axes.axvspan
{}
Returns:
Name Type Descriptionax
Axes
Handle to updated Axes.
Source code incuisto/display.py
def add_injection_patch(X: list, ax: plt.Axes, **kwargs) -> plt.Axes:\n \"\"\"\n Add a patch representing the injection sites.\n\n The patch will span from the minimal coordinate to the maximal.\n If plotted in stereotaxic coordinates, coordinates should be converted beforehand.\n\n Parameters\n ----------\n X : list\n Coordinates in mm for each animals. Can be empty to not plot anything.\n ax : Axes\n Handle to axes where to add the patch.\n **kwargs : passed to Axes.axvspan\n\n Returns\n -------\n ax : Axes\n Handle to updated Axes.\n\n \"\"\"\n # plot patch\n if len(X) > 0:\n ax.axvspan(min(X), max(X), **kwargs)\n\n return ax\n
"},{"location":"api-display.html#cuisto.display.draw_structure_outline","title":"draw_structure_outline(view='sagittal', structures=['root'], outline_file='', ax=None, microns=False, **kwargs)
","text":"Plot brain regions outlines in given projection.
This requires a file containing the structures outlines.
Parameters:
Name Type Description Defaultview
str
Projection, \"sagittal\", \"coronal\" or \"top\". Default is \"sagittal\".
'sagittal'
structures
list[str]
List of structures acronyms whose outlines will be drawn. Default is [\"root\"].
['root']
outline_file
str
Full path the outlines HDF5 file.
''
ax
Axes or None
Axes where to plot the outlines. If None, get current axes (the default).
None
microns
bool
If False (default), converts the coordinates in mm.
False
**kwargs
passed to pyplot.plot()
{}
Returns:
Name Type Descriptionax
Axes
Source code in cuisto/display.py
def draw_structure_outline(\n view: str = \"sagittal\",\n structures: list[str] = [\"root\"],\n outline_file: str = \"\",\n ax: plt.Axes | None = None,\n microns: bool = False,\n **kwargs,\n) -> plt.Axes:\n \"\"\"\n Plot brain regions outlines in given projection.\n\n This requires a file containing the structures outlines.\n\n Parameters\n ----------\n view : str\n Projection, \"sagittal\", \"coronal\" or \"top\". Default is \"sagittal\".\n structures : list[str]\n List of structures acronyms whose outlines will be drawn. Default is [\"root\"].\n outline_file : str\n Full path the outlines HDF5 file.\n ax : plt.Axes or None, optional\n Axes where to plot the outlines. If None, get current axes (the default).\n microns : bool, optional\n If False (default), converts the coordinates in mm.\n **kwargs : passed to pyplot.plot()\n\n Returns\n -------\n ax : plt.Axes\n\n \"\"\"\n # get axes\n if not ax:\n ax = plt.gca()\n\n # get units\n if microns:\n conv = 1\n else:\n conv = 1 / 1000\n\n with h5py.File(outline_file) as f:\n if view == \"sagittal\":\n for structure in structures:\n dsets = f[\"sagittal\"][structure]\n\n for dset in dsets.values():\n ax.plot(dset[:, 0] * conv, dset[:, 1] * conv, **kwargs)\n\n if view == \"coronal\":\n for structure in structures:\n dsets = f[\"coronal\"][structure]\n\n for dset in dsets.values():\n ax.plot(dset[:, 0] * conv, dset[:, 1] * conv, **kwargs)\n\n if view == \"top\":\n for structure in structures:\n dsets = f[\"top\"][structure]\n\n for dset in dsets.values():\n ax.plot(dset[:, 0] * conv, dset[:, 1] * conv, **kwargs)\n\n return ax\n
"},{"location":"api-display.html#cuisto.display.nice_bar_plot","title":"nice_bar_plot(df, x='', y=[''], hue='', ylabel=[''], orient='h', nx=None, ordering=None, names_list=None, hue_mirror=False, log_scale=False, bar_kws={}, pts_kws={})
","text":"Nice bar plot of per-region objects distribution.
This is used for objects distribution across brain regions. Shows the y
metric (count, aeral density, cumulated length...) in each x
categories (brain regions). orient
controls wether the bars are shown horizontally (default) or vertically. Input df
must have an additional \"hemisphere\" column. All y
are plotted in the same figure as different subplots. nx
controls the number of displayed regions.
Parameters:
Name Type Description Defaultdf
DataFrame
required x
str
Key in df
.
''
y
str
Key in df
.
''
hue
str
Key in df
.
''
ylabel
list of str
Y axis labels.
['']
orient
h or v
\"h\" for horizontal bars (default) or \"v\" for vertical bars.
'h'
nx
None or int
Number of x
to show in the plot. Default is None (no limit).
None
ordering
None or list[str] or max
Sorted list of acronyms. Data will be sorted follwowing this order, if \"max\", sorted by descending values, if None, not sorted (default).
None
names_list
list or None
List of names to display. If None (default), takes the most prominent overall ones.
None
hue_mirror
bool
If there are 2 groups, plot in mirror. Default is False.
False
log_scale
bool
Set the metrics in log scale. Default is False.
False
bar_kws
dict
Passed to seaborn.barplot().
{}
pts_kws
dict
Passed to seaborn.stripplot().
{}
Returns:
Name Type Descriptionfigs
list
List of figures.
Source code incuisto/display.py
def nice_bar_plot(\n df: pd.DataFrame,\n x: str = \"\",\n y: list[str] = [\"\"],\n hue: str = \"\",\n ylabel: list[str] = [\"\"],\n orient=\"h\",\n nx: None | int = None,\n ordering: None | list[str] | str = None,\n names_list: None | list = None,\n hue_mirror: bool = False,\n log_scale: bool = False,\n bar_kws: dict = {},\n pts_kws: dict = {},\n) -> list[plt.Axes]:\n \"\"\"\n Nice bar plot of per-region objects distribution.\n\n This is used for objects distribution across brain regions. Shows the `y` metric\n (count, aeral density, cumulated length...) in each `x` categories (brain regions).\n `orient` controls wether the bars are shown horizontally (default) or vertically.\n Input `df` must have an additional \"hemisphere\" column. All `y` are plotted in the\n same figure as different subplots. `nx` controls the number of displayed regions.\n\n Parameters\n ----------\n df : pandas.DataFrame\n x, y, hue : str\n Key in `df`.\n ylabel : list of str\n Y axis labels.\n orient : \"h\" or \"v\", optional\n \"h\" for horizontal bars (default) or \"v\" for vertical bars.\n nx : None or int, optional\n Number of `x` to show in the plot. Default is None (no limit).\n ordering : None or list[str] or \"max\", optional\n Sorted list of acronyms. Data will be sorted follwowing this order, if \"max\",\n sorted by descending values, if None, not sorted (default).\n names_list : list or None, optional\n List of names to display. If None (default), takes the most prominent overall\n ones.\n hue_mirror : bool, optional\n If there are 2 groups, plot in mirror. Default is False.\n log_scale : bool, optional\n Set the metrics in log scale. Default is False.\n bar_kws : dict\n Passed to seaborn.barplot().\n pts_kws : dict\n Passed to seaborn.stripplot().\n\n Returns\n -------\n figs : list\n List of figures.\n\n \"\"\"\n figs = []\n # loop for each features\n for yi, ylabeli in zip(y, ylabel):\n # prepare data\n # get nx first most prominent regions\n if not names_list:\n names_list_plt = (\n df.groupby([\"Name\"])[yi].mean().sort_values(ascending=False).index[0:nx]\n )\n else:\n names_list_plt = names_list\n dfplt = df[df[\"Name\"].isin(names_list_plt)] # limit to those regions\n # limit hierarchy list if provided\n if isinstance(ordering, list):\n order = [el for el in ordering if el in names_list_plt]\n elif ordering == \"max\":\n order = names_list_plt\n else:\n order = None\n\n # reorder keys depending on orientation and create axes\n if orient == \"h\":\n xp = yi\n yp = x\n if hue_mirror:\n nrows = 1\n ncols = 2\n sharex = None\n sharey = \"all\"\n else:\n nrows = 1\n ncols = 1\n sharex = None\n sharey = None\n elif orient == \"v\":\n xp = x\n yp = yi\n if hue_mirror:\n nrows = 2\n ncols = 1\n sharex = \"all\"\n sharey = None\n else:\n nrows = 1\n ncols = 1\n sharex = None\n sharey = None\n fig, axs = plt.subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey)\n\n if hue_mirror:\n # two graphs\n ax1, ax2 = axs\n # determine what will be mirrored\n if hue == \"channel\":\n hue_filter = \"hemisphere\"\n elif hue == \"hemisphere\":\n hue_filter = \"channel\"\n # select the two types (should be left/right or two channels)\n hue_filters = dfplt[hue_filter].unique()[0:2]\n hue_filters.sort() # make sure it will be always in the same order\n\n # plot\n for filt, ax in zip(hue_filters, [ax1, ax2]):\n dfplt2 = dfplt[dfplt[hue_filter] == filt]\n ax = sns.barplot(\n dfplt2,\n x=xp,\n y=yp,\n hue=hue,\n estimator=\"mean\",\n errorbar=\"se\",\n orient=orient,\n order=order,\n ax=ax,\n **bar_kws,\n )\n # add points\n ax = sns.stripplot(\n dfplt2, x=xp, y=yp, hue=hue, legend=False, ax=ax, **pts_kws\n )\n\n # cosmetics\n if orient == \"h\":\n ax.set_title(f\"{hue_filter}: {filt}\")\n ax.set_ylabel(None)\n ax.set_ylim((nx + 0.5, -0.5))\n if log_scale:\n ax.set_xscale(\"log\")\n\n elif orient == \"v\":\n if ax == ax1:\n # top title\n ax1.set_title(f\"{hue_filter}: {filt}\")\n ax.set_xlabel(None)\n elif ax == ax2:\n # use xlabel as bottom title\n ax2.set_xlabel(\n f\"{hue_filter}: {filt}\", fontsize=ax1.title.get_fontsize()\n )\n ax.set_xlim((-0.5, nx + 0.5))\n if log_scale:\n ax.set_yscale(\"log\")\n\n for label in ax.get_xticklabels():\n label.set_verticalalignment(\"center\")\n label.set_horizontalalignment(\"center\")\n\n # tune axes cosmetics\n if orient == \"h\":\n ax1.set_xlabel(ylabeli)\n ax2.set_xlabel(ylabeli)\n ax1.set_xlim(\n ax1.get_xlim()[0], max((ax1.get_xlim()[1], ax2.get_xlim()[1]))\n )\n ax2.set_xlim(\n ax2.get_xlim()[0], max((ax1.get_xlim()[1], ax2.get_xlim()[1]))\n )\n ax1.invert_xaxis()\n sns.despine(ax=ax1, left=True, top=True, right=False, bottom=False)\n sns.despine(ax=ax2, left=False, top=True, right=True, bottom=False)\n ax1.yaxis.tick_right()\n ax1.tick_params(axis=\"y\", pad=20)\n for label in ax1.get_yticklabels():\n label.set_verticalalignment(\"center\")\n label.set_horizontalalignment(\"center\")\n elif orient == \"v\":\n ax2.set_ylabel(ylabeli)\n ax1.set_ylim(\n ax1.get_ylim()[0], max((ax1.get_ylim()[1], ax2.get_ylim()[1]))\n )\n ax2.set_ylim(\n ax2.get_ylim()[0], max((ax1.get_ylim()[1], ax2.get_ylim()[1]))\n )\n ax2.invert_yaxis()\n sns.despine(ax=ax1, left=False, top=True, right=True, bottom=False)\n sns.despine(ax=ax2, left=False, top=False, right=True, bottom=True)\n for label in ax2.get_xticklabels():\n label.set_verticalalignment(\"center\")\n label.set_horizontalalignment(\"center\")\n ax2.tick_params(axis=\"x\", labelrotation=90, pad=20)\n\n else:\n # one graph\n ax = axs\n # plot\n ax = sns.barplot(\n dfplt,\n x=xp,\n y=yp,\n hue=hue,\n estimator=\"mean\",\n errorbar=\"se\",\n orient=orient,\n order=order,\n ax=ax,\n **bar_kws,\n )\n # add points\n ax = sns.stripplot(\n dfplt, x=xp, y=yp, hue=hue, legend=False, ax=ax, **pts_kws\n )\n\n # cosmetics\n if orient == \"h\":\n ax.set_xlabel(ylabeli)\n ax.set_ylabel(None)\n ax.set_ylim((nx + 0.5, -0.5))\n if log_scale:\n ax.set_xscale(\"log\")\n elif orient == \"v\":\n ax.set_xlabel(None)\n ax.set_ylabel(ylabeli)\n ax.set_xlim((-0.5, nx + 0.5))\n if log_scale:\n ax.set_yscale(\"log\")\n\n fig.tight_layout(pad=0)\n figs.append(fig)\n\n return figs\n
"},{"location":"api-display.html#cuisto.display.nice_distribution_plot","title":"nice_distribution_plot(df, x='', y='', hue=None, xlabel='', ylabel='', injections_sites={}, channel_colors={}, channel_names={}, ax=None, **kwargs)
","text":"Nice plot of 1D distribution of objects.
Parameters:
Name Type Description Defaultdf
DataFrame
required x
str
Keys in df
.
''
y
str
Keys in df
.
''
hue
str or None
Key in df
. If None, no hue is used.
None
xlabel
str
X and Y axes labels.
''
ylabel
str
X and Y axes labels.
''
injections_sites
dict
List of injection sites 1D coordinates in a dict with the channel name as key. If empty, injection site is not plotted (default).
{}
channel_colors
dict
Required if injections_sites is not empty, dict mapping channel names to a color.
{}
channel_names
dict
Required if injections_sites is not empty, dict mapping channel names to a display name.
{}
ax
Axes or None
Axes in which to plot the figure, if None, a new figure is created (default).
None
**kwargs
passed to seaborn.lineplot()
{}
Returns:
Name Type Descriptionax
matplotlib axes
Handle to axes.
Source code incuisto/display.py
def nice_distribution_plot(\n df: pd.DataFrame,\n x: str = \"\",\n y: str = \"\",\n hue: str | None = None,\n xlabel: str = \"\",\n ylabel: str = \"\",\n injections_sites: dict = {},\n channel_colors: dict = {},\n channel_names: dict = {},\n ax: plt.Axes | None = None,\n **kwargs,\n) -> plt.Axes:\n \"\"\"\n Nice plot of 1D distribution of objects.\n\n Parameters\n ----------\n df : pandas.DataFrame\n x, y : str\n Keys in `df`.\n hue : str or None, optional\n Key in `df`. If None, no hue is used.\n xlabel, ylabel : str\n X and Y axes labels.\n injections_sites : dict, optional\n List of injection sites 1D coordinates in a dict with the channel name as key.\n If empty, injection site is not plotted (default).\n channel_colors : dict, optional\n Required if injections_sites is not empty, dict mapping channel names to a\n color.\n channel_names : dict, optional\n Required if injections_sites is not empty, dict mapping channel names to a\n display name.\n ax : Axes or None, optional\n Axes in which to plot the figure, if None, a new figure is created (default).\n **kwargs : passed to seaborn.lineplot()\n\n Returns\n -------\n ax : matplotlib axes\n Handle to axes.\n\n \"\"\"\n if not ax:\n # create figure\n _, ax = plt.subplots(figsize=(10, 6))\n\n ax = sns.lineplot(\n df,\n x=x,\n y=y,\n hue=hue,\n estimator=\"mean\",\n errorbar=\"se\",\n ax=ax,\n **kwargs,\n )\n\n for channel in injections_sites.keys():\n ax = add_injection_patch(\n injections_sites[channel],\n ax,\n color=channel_colors[channel],\n edgecolor=None,\n alpha=0.25,\n label=channel_names[channel] + \": inj. site\",\n )\n\n ax.legend()\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n return ax\n
"},{"location":"api-display.html#cuisto.display.nice_heatmap","title":"nice_heatmap(df, animals, x='', y='', xlabel='', ylabel='', invertx=False, inverty=False, **kwargs)
","text":"Nice plots of 2D distribution of boutons as a heatmap per animal.
Parameters:
Name Type Description Defaultdf
DataFrame
required animals
list-like of str
List of animals.
requiredx
str
Keys in df
.
''
y
str
Keys in df
.
''
xlabel
str
Labels of x and y axes.
''
ylabel
str
Labels of x and y axes.
''
invertx
bool
Wether to inverse the x or y axes. Default is False.
False
inverty
bool
Wether to inverse the x or y axes. Default is False.
False
**kwargs
passed to seaborn.histplot()
{}
Returns:
Name Type Descriptionax
Axes or list of Axes
Handle to axes.
Source code incuisto/display.py
def nice_heatmap(\n df: pd.DataFrame,\n animals: tuple[str] | list[str],\n x: str = \"\",\n y: str = \"\",\n xlabel: str = \"\",\n ylabel: str = \"\",\n invertx: bool = False,\n inverty: bool = False,\n **kwargs,\n) -> list[plt.Axes] | plt.Axes:\n \"\"\"\n Nice plots of 2D distribution of boutons as a heatmap per animal.\n\n Parameters\n ----------\n df : pandas.DataFrame\n animals : list-like of str\n List of animals.\n x, y : str\n Keys in `df`.\n xlabel, ylabel : str\n Labels of x and y axes.\n invertx, inverty : bool, optional\n Wether to inverse the x or y axes. Default is False.\n **kwargs : passed to seaborn.histplot()\n\n Returns\n -------\n ax : Axes or list of Axes\n Handle to axes.\n\n \"\"\"\n\n # 2D distribution, per animal\n _, axs = plt.subplots(len(animals), 1, sharex=\"all\")\n\n for animal, ax in zip(animals, axs):\n ax = sns.histplot(\n df[df[\"animal\"] == animal],\n x=x,\n y=y,\n ax=ax,\n **kwargs,\n )\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_title(animal)\n\n if inverty:\n ax.invert_yaxis()\n\n if invertx:\n axs[-1].invert_xaxis() # only once since all x axes are shared\n\n return axs\n
"},{"location":"api-display.html#cuisto.display.nice_joint_plot","title":"nice_joint_plot(df, x='', y='', xlabel='', ylabel='', invertx=False, inverty=False, outline_kws={}, ax=None, **kwargs)
","text":"Joint distribution.
Used to display a 2D heatmap of objects. This is more qualitative than quantitative, for display purposes.
Parameters:
Name Type Description Defaultdf
DataFrame
required x
str
Keys in df
.
''
y
str
Keys in df
.
''
xlabel
str
Label of x and y axes.
''
ylabel
str
Label of x and y axes.
''
invertx
bool
Whether to inverse the x or y axes. Default is False for both.
False
inverty
bool
Whether to inverse the x or y axes. Default is False for both.
False
outline_kws
dict
Passed to draw_structure_outline().
{}
ax
Axes or None
Axes to plot in. If None, draws in current axes (default).
None
**kwargs
Passed to seaborn.histplot.
{}
Returns:
Name Type Descriptionax
Axes
Source code in cuisto/display.py
def nice_joint_plot(\n df: pd.DataFrame,\n x: str = \"\",\n y: str = \"\",\n xlabel: str = \"\",\n ylabel: str = \"\",\n invertx: bool = False,\n inverty: bool = False,\n outline_kws: dict = {},\n ax: plt.Axes | None = None,\n **kwargs,\n) -> plt.Figure:\n \"\"\"\n Joint distribution.\n\n Used to display a 2D heatmap of objects. This is more qualitative than quantitative,\n for display purposes.\n\n Parameters\n ----------\n df : pandas.DataFrame\n x, y : str\n Keys in `df`.\n xlabel, ylabel : str\n Label of x and y axes.\n invertx, inverty : bool, optional\n Whether to inverse the x or y axes. Default is False for both.\n outline_kws : dict\n Passed to draw_structure_outline().\n ax : plt.Axes or None, optional\n Axes to plot in. If None, draws in current axes (default).\n **kwargs\n Passed to seaborn.histplot.\n\n Returns\n -------\n ax : plt.Axes\n\n \"\"\"\n if not ax:\n ax = plt.gca()\n\n # plot outline\n draw_structure_outline(ax=ax, **outline_kws)\n\n # plot joint distribution\n sns.histplot(\n df,\n x=x,\n y=y,\n ax=ax,\n **kwargs,\n )\n\n # adjust axes\n if invertx:\n ax.invert_xaxis()\n if inverty:\n ax.invert_yaxis()\n\n # labels\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n return ax\n
"},{"location":"api-display.html#cuisto.display.plot_1D_distributions","title":"plot_1D_distributions(dfs_distributions, cfg, df_coordinates=None)
","text":"Wraps nice_distribution_plot().
Source code incuisto/display.py
def plot_1D_distributions(\n dfs_distributions: list[pd.DataFrame],\n cfg,\n df_coordinates: pd.DataFrame = None,\n):\n \"\"\"\n Wraps nice_distribution_plot().\n \"\"\"\n # prepare figures\n fig, axs_dist = plt.subplots(1, 3, sharey=True, figsize=(13, 6))\n xlabels = [\n \"Rostro-caudal position (mm)\",\n \"Dorso-ventral position (mm)\",\n \"Medio-lateral position (mm)\",\n ]\n\n # get animals\n animals = []\n for df in dfs_distributions:\n animals.extend(df[\"animal\"].unique())\n animals = set(animals)\n\n # get injection sites\n if cfg.distributions[\"display\"][\"show_injection\"]:\n injection_sites = cfg.get_injection_sites(animals)\n else:\n injection_sites = {k: {} for k in range(3)}\n\n # get color palette based on hue\n hue = cfg.distributions[\"hue\"]\n palette = cfg.get_hue_palette(\"distributions\")\n\n # loop through each axis\n for df_dist, ax_dist, xlabel, inj_sites in zip(\n dfs_distributions, axs_dist, xlabels, injection_sites.values()\n ):\n # select data\n if cfg.distributions[\"hue\"] == \"hemisphere\":\n dfplt = df_dist[df_dist[\"hemisphere\"] != \"both\"]\n elif cfg.distributions[\"hue\"] == \"channel\":\n dfplt = df_dist[df_dist[\"channel\"] != \"all\"]\n\n # plot\n ax_dist = nice_distribution_plot(\n dfplt,\n x=\"bins\",\n y=\"distribution\",\n hue=hue,\n xlabel=xlabel,\n ylabel=\"normalized distribution\",\n injections_sites=inj_sites,\n channel_colors=cfg.channels[\"colors\"],\n channel_names=cfg.channels[\"names\"],\n linewidth=2,\n palette=palette,\n ax=ax_dist,\n )\n\n # add data coverage\n if (\"Atlas_AP\" in df_dist[\"axis\"].unique()) & (df_coordinates is not None):\n df_coverage = utils.get_data_coverage(df_coordinates)\n ax_dist = add_data_coverage(df_coverage, ax_dist, edgecolor=None, alpha=0.5)\n ax_dist.legend()\n else:\n ax_dist.legend().remove()\n\n # - Distributions, per animal\n if len(animals) > 1:\n _, axs_dist = plt.subplots(1, 3, sharey=True)\n\n # loop through each axis\n for df_dist, ax_dist, xlabel, inj_sites in zip(\n dfs_distributions, axs_dist, xlabels, injection_sites.values()\n ):\n # select data\n df_dist_plot = df_dist[df_dist[\"hemisphere\"] == \"both\"]\n\n # plot\n ax_dist = nice_distribution_plot(\n df_dist_plot,\n x=\"bins\",\n y=\"distribution\",\n hue=\"animal\",\n xlabel=xlabel,\n ylabel=\"normalized distribution\",\n injections_sites=inj_sites,\n channel_colors=cfg.channels[\"colors\"],\n channel_names=cfg.channels[\"names\"],\n linewidth=2,\n ax=ax_dist,\n )\n\n return fig\n
"},{"location":"api-display.html#cuisto.display.plot_2D_distributions","title":"plot_2D_distributions(df, cfg)
","text":"Wraps nice_joint_plot().
Source code incuisto/display.py
def plot_2D_distributions(df: pd.DataFrame, cfg):\n \"\"\"\n Wraps nice_joint_plot().\n \"\"\"\n # -- 2D heatmap, all animals pooled\n # prepare figure\n fig_heatmap = plt.figure(figsize=(12, 9))\n\n ax_sag = fig_heatmap.add_subplot(2, 2, 1)\n ax_cor = fig_heatmap.add_subplot(2, 2, 2, sharey=ax_sag)\n ax_top = fig_heatmap.add_subplot(2, 2, 3, sharex=ax_sag)\n ax_cbar = fig_heatmap.add_subplot(2, 2, 4, box_aspect=15)\n\n # prepare options\n map_options = dict(\n bins=cfg.distributions[\"display\"][\"cmap_nbins\"],\n cmap=cfg.distributions[\"display\"][\"cmap\"],\n rasterized=True,\n thresh=10,\n stat=\"count\",\n vmin=cfg.distributions[\"display\"][\"cmap_lim\"][0],\n vmax=cfg.distributions[\"display\"][\"cmap_lim\"][1],\n )\n outline_kws = dict(\n structures=cfg.atlas[\"outline_structures\"],\n outline_file=cfg.files[\"outlines\"],\n linewidth=1.5,\n color=\"k\",\n )\n cbar_kws = dict(label=\"count\")\n\n # determine which axes are going to be inverted\n if cfg.atlas[\"type\"] == \"brain\":\n cor_invertx = True\n cor_inverty = False\n top_invertx = True\n top_inverty = False\n elif cfg.atlas[\"type\"] == \"cord\":\n cor_invertx = False\n cor_inverty = False\n top_invertx = True\n top_inverty = True\n\n # - sagittal\n # no need to invert axes because they are shared with the two other views\n outline_kws[\"view\"] = \"sagittal\"\n nice_joint_plot(\n df,\n x=\"Atlas_X\",\n y=\"Atlas_Y\",\n xlabel=\"Rostro-caudal (mm)\",\n ylabel=\"Dorso-ventral (mm)\",\n outline_kws=outline_kws,\n ax=ax_sag,\n **map_options,\n )\n\n # - coronal\n outline_kws[\"view\"] = \"coronal\"\n nice_joint_plot(\n df,\n x=\"Atlas_Z\",\n y=\"Atlas_Y\",\n xlabel=\"Medio-lateral (mm)\",\n ylabel=\"Dorso-ventral (mm)\",\n invertx=cor_invertx,\n inverty=cor_inverty,\n outline_kws=outline_kws,\n ax=ax_cor,\n **map_options,\n )\n ax_cor.invert_yaxis()\n\n # - top\n outline_kws[\"view\"] = \"top\"\n nice_joint_plot(\n df,\n x=\"Atlas_X\",\n y=\"Atlas_Z\",\n xlabel=\"Rostro-caudal (mm)\",\n ylabel=\"Medio-lateral (mm)\",\n invertx=top_invertx,\n inverty=top_inverty,\n outline_kws=outline_kws,\n ax=ax_top,\n cbar=True,\n cbar_ax=ax_cbar,\n cbar_kws=cbar_kws,\n **map_options,\n )\n fig_heatmap.suptitle(\"sagittal, coronal and top-view projections\")\n\n # -- 2D heatmap per animals\n # get animals\n animals = df[\"animal\"].unique()\n if len(animals) > 1:\n # Rostro-caudal, dorso-ventral (sagittal)\n _ = nice_heatmap(\n df,\n animals,\n x=\"Atlas_X\",\n y=\"Atlas_Y\",\n xlabel=\"Rostro-caudal (mm)\",\n ylabel=\"Dorso-ventral (mm)\",\n invertx=True,\n inverty=True,\n cmap=\"OrRd\",\n rasterized=True,\n cbar=True,\n )\n\n # Medio-lateral, dorso-ventral (coronal)\n _ = nice_heatmap(\n df,\n animals,\n x=\"Atlas_Z\",\n y=\"Atlas_Y\",\n xlabel=\"Medio-lateral (mm)\",\n ylabel=\"Dorso-ventral (mm)\",\n inverty=True,\n invertx=True,\n cmap=\"OrRd\",\n rasterized=True,\n )\n\n return fig_heatmap\n
"},{"location":"api-display.html#cuisto.display.plot_regions","title":"plot_regions(df, cfg, **kwargs)
","text":"Wraps nice_bar_plot().
Source code incuisto/display.py
def plot_regions(df: pd.DataFrame, cfg, **kwargs):\n \"\"\"\n Wraps nice_bar_plot().\n \"\"\"\n # get regions order\n if cfg.regions[\"display\"][\"order\"] == \"ontology\":\n regions_order = [d[\"acronym\"] for d in cfg.bg_atlas.structures_list]\n elif cfg.regions[\"display\"][\"order\"] == \"max\":\n regions_order = \"max\"\n else:\n regions_order = None\n\n # determine metrics to be plotted and color palette based on hue\n metrics = [*cfg.regions[\"display\"][\"metrics\"].keys()]\n hue = cfg.regions[\"hue\"]\n palette = cfg.get_hue_palette(\"regions\")\n\n # select data\n dfplt = utils.select_hemisphere_channel(\n df, hue, cfg.regions[\"hue_filter\"], cfg.regions[\"hue_mirror\"]\n )\n\n # prepare options\n bar_kws = dict(\n err_kws={\"linewidth\": 1.5},\n dodge=cfg.regions[\"display\"][\"dodge\"],\n palette=palette,\n )\n pts_kws = dict(\n size=4,\n edgecolor=\"auto\",\n linewidth=0.75,\n dodge=cfg.regions[\"display\"][\"dodge\"],\n palette=palette,\n )\n # draw\n figs = nice_bar_plot(\n dfplt,\n x=\"Name\",\n y=metrics,\n hue=hue,\n ylabel=[*cfg.regions[\"display\"][\"metrics\"].values()],\n orient=cfg.regions[\"display\"][\"orientation\"],\n nx=cfg.regions[\"display\"][\"nregions\"],\n ordering=regions_order,\n hue_mirror=cfg.regions[\"hue_mirror\"],\n log_scale=cfg.regions[\"display\"][\"log_scale\"],\n bar_kws=bar_kws,\n pts_kws=pts_kws,\n **kwargs,\n )\n\n return figs\n
"},{"location":"api-io.html","title":"cuisto.io","text":"io module, part of cuisto.
Contains loading and saving functions.
"},{"location":"api-io.html#cuisto.io.cat_csv_dir","title":"cat_csv_dir(directory, **kwargs)
","text":"Scans a directory for csv files and concatenate them into a single DataFrame.
Parameters:
Name Type Description Defaultdirectory
str
Path to the directory to scan.
required**kwargs
passed to pandas.read_csv()
{}
Returns:
Name Type Descriptiondf
DataFrame
All CSV files concatenated in a single DataFrame.
Source code incuisto/io.py
def cat_csv_dir(directory, **kwargs) -> pd.DataFrame:\n \"\"\"\n Scans a directory for csv files and concatenate them into a single DataFrame.\n\n Parameters\n ----------\n directory : str\n Path to the directory to scan.\n **kwargs : passed to pandas.read_csv()\n\n Returns\n -------\n df : pandas.DataFrame\n All CSV files concatenated in a single DataFrame.\n\n \"\"\"\n return pd.concat(\n pd.read_csv(\n os.path.join(directory, filename),\n **kwargs,\n )\n for filename in os.listdir(directory)\n if (filename.endswith(\".csv\"))\n and not check_empty_file(os.path.join(directory, filename), threshold=1)\n )\n
"},{"location":"api-io.html#cuisto.io.cat_data_dir","title":"cat_data_dir(directory, segtype, **kwargs)
","text":"Wraps either cat_csv_dir() or cat_json_dir() depending on segtype
.
Parameters:
Name Type Description Defaultdirectory
str
Path to the directory to scan.
requiredsegtype
str
\"synaptophysin\" or \"fibers\".
required**kwargs
passed to cat_csv_dir() or cat_json_dir().
{}
Returns:
Name Type Descriptiondf
DataFrame
All files concatenated in a single DataFrame.
Source code incuisto/io.py
def cat_data_dir(directory: str, segtype: str, **kwargs) -> pd.DataFrame:\n \"\"\"\n Wraps either cat_csv_dir() or cat_json_dir() depending on `segtype`.\n\n Parameters\n ----------\n directory : str\n Path to the directory to scan.\n segtype : str\n \"synaptophysin\" or \"fibers\".\n **kwargs : passed to cat_csv_dir() or cat_json_dir().\n\n Returns\n -------\n df : pd.DataFrame\n All files concatenated in a single DataFrame.\n\n \"\"\"\n if segtype in CSV_KW:\n # remove kwargs for json\n kwargs.pop(\"hemisphere_names\", None)\n kwargs.pop(\"atlas\", None)\n return cat_csv_dir(directory, **kwargs)\n elif segtype in JSON_KW:\n kwargs = {k: kwargs[k] for k in [\"hemisphere_names\", \"atlas\"] if k in kwargs}\n return cat_json_dir(directory, **kwargs)\n else:\n raise ValueError(\n f\"'{segtype}' not supported, unable to determine if CSV or JSON.\"\n )\n
"},{"location":"api-io.html#cuisto.io.cat_json_dir","title":"cat_json_dir(directory, hemisphere_names, atlas)
","text":"Scans a directory for json files and concatenate them in a single DataFrame.
The json files must be generated with 'workflow_import_export.groovy\" from a QuPath project.
Parameters:
Name Type Description Defaultdirectory
str
Path to the directory to scan.
requiredhemisphere_names
dict
Maps between hemisphere names in the json files (\"Right\" and \"Left\") to something else (eg. \"Ipsi.\" and \"Contra.\").
requiredatlas
BrainGlobeAtlas
Atlas to read regions from.
requiredReturns:
Name Type Descriptiondf
DataFrame
All JSON files concatenated in a single DataFrame.
Source code incuisto/io.py
def cat_json_dir(\n directory: str, hemisphere_names: dict, atlas: BrainGlobeAtlas\n) -> pd.DataFrame:\n \"\"\"\n Scans a directory for json files and concatenate them in a single DataFrame.\n\n The json files must be generated with 'workflow_import_export.groovy\" from a QuPath\n project.\n\n Parameters\n ----------\n directory : str\n Path to the directory to scan.\n hemisphere_names : dict\n Maps between hemisphere names in the json files (\"Right\" and \"Left\") to\n something else (eg. \"Ipsi.\" and \"Contra.\").\n atlas : BrainGlobeAtlas\n Atlas to read regions from.\n\n Returns\n -------\n df : pd.DataFrame\n All JSON files concatenated in a single DataFrame.\n\n \"\"\"\n # list files\n files_list = [\n os.path.join(directory, filename)\n for filename in os.listdir(directory)\n if (filename.endswith(\".json\"))\n ]\n\n data = [] # prepare list of DataFrame\n for filename in files_list:\n with open(filename, \"rb\") as fid:\n df = pd.DataFrame.from_dict(\n orjson.loads(fid.read())[\"paths\"], orient=\"index\"\n )\n df[\"Image\"] = os.path.basename(filename).split(\"_detections\")[0]\n data.append(df)\n\n df = (\n pd.concat(data)\n .explode(\n [\"x\", \"y\", \"z\", \"hemisphere\"]\n ) # get an entry for each point of segments\n .reset_index()\n .rename(\n columns=dict(\n x=\"Atlas_X\",\n y=\"Atlas_Y\",\n z=\"Atlas_Z\",\n index=\"Object ID\",\n classification=\"Classification\",\n )\n )\n .set_index(\"Object ID\")\n )\n\n # change hemisphere names\n df[\"hemisphere\"] = df[\"hemisphere\"].map(hemisphere_names)\n\n # add object type\n df[\"Object type\"] = \"Detection\"\n\n # add brain regions\n df = utils.add_brain_region(df, atlas, col=\"Parent\")\n\n return df\n
"},{"location":"api-io.html#cuisto.io.check_empty_file","title":"check_empty_file(filename, threshold=1)
","text":"Checks if a file is empty.
Empty is defined as a file whose number of lines is lower than or equal to threshold
(to allow for headers).
Parameters:
Name Type Description Defaultfilename
str
Full path to the file to check.
requiredthreshold
int
If number of lines is lower than or equal to this value, it is considered as empty. Default is 1.
1
Returns:
Name Type Descriptionempty
bool
True if the file is empty as defined above.
Source code incuisto/io.py
def check_empty_file(filename: str, threshold: int = 1) -> bool:\n \"\"\"\n Checks if a file is empty.\n\n Empty is defined as a file whose number of lines is lower than or equal to\n `threshold` (to allow for headers).\n\n Parameters\n ----------\n filename : str\n Full path to the file to check.\n threshold : int, optional\n If number of lines is lower than or equal to this value, it is considered as\n empty. Default is 1.\n\n Returns\n -------\n empty : bool\n True if the file is empty as defined above.\n\n \"\"\"\n with open(filename, \"rb\") as fid:\n nlines = sum(1 for _ in fid)\n\n if nlines <= threshold:\n return True\n else:\n return False\n
"},{"location":"api-io.html#cuisto.io.get_measurements_directory","title":"get_measurements_directory(wdir, animal, kind, segtype)
","text":"Get the directory with detections or annotations measurements for given animal ID.
Parameters:
Name Type Description Defaultwdir
str
Base working directory.
requiredanimal
str
Animal ID.
requiredkind
str
\"annotation\" or \"detection\".
requiredsegtype
str
Type of segmentation, eg. \"synaptophysin\".
requiredReturns:
Name Type Descriptiondirectory
str
Path to detections or annotations directory.
Source code incuisto/io.py
def get_measurements_directory(wdir, animal: str, kind: str, segtype: str) -> str:\n \"\"\"\n Get the directory with detections or annotations measurements for given animal ID.\n\n Parameters\n ----------\n wdir : str\n Base working directory.\n animal : str\n Animal ID.\n kind : str\n \"annotation\" or \"detection\".\n segtype : str\n Type of segmentation, eg. \"synaptophysin\".\n\n Returns\n -------\n directory : str\n Path to detections or annotations directory.\n\n \"\"\"\n bdir = os.path.join(wdir, animal, animal.lower() + \"_segmentation\", segtype)\n\n if (kind == \"detection\") or (kind == \"detections\"):\n return os.path.join(bdir, \"detections\")\n elif (kind == \"annotation\") or (kind == \"annotations\"):\n return os.path.join(bdir, \"annotations\")\n else:\n raise ValueError(\n f\"kind = '{kind}' not supported. Choose 'detection' or 'annotation'.\"\n )\n
"},{"location":"api-io.html#cuisto.io.load_dfs","title":"load_dfs(filepath, fmt, identifiers=['df_regions', 'df_coordinates', 'df_distribution_ap', 'df_distribution_dv', 'df_distribution_ml'])
","text":"Load DataFrames from file.
If fmt
is \"h5\" (\"xslx\"), identifiers are interpreted as h5 group identifier (sheet name, respectively). If fmt
is \"pickle\", \"csv\" or \"tsv\", identifiers are appended to filename
. Path to the file can't have a dot (\".\") in it.
Parameters:
Name Type Description Defaultfilepath
str
Full path to the file(s), without extension.
requiredfmt
(h5, csv, pickle, xlsx)
File(s) format.
\"h5\"
identifiers
list of str
List of identifiers to load from files. Defaults to the ones saved in cuisto.process.process_animals().
['df_regions', 'df_coordinates', 'df_distribution_ap', 'df_distribution_dv', 'df_distribution_ml']
Returns:
Type DescriptionAll requested DataFrames.
Source code in cuisto/io.py
def load_dfs(\n filepath: str,\n fmt: str,\n identifiers: list[str] = [\n \"df_regions\",\n \"df_coordinates\",\n \"df_distribution_ap\",\n \"df_distribution_dv\",\n \"df_distribution_ml\",\n ],\n):\n \"\"\"\n Load DataFrames from file.\n\n If `fmt` is \"h5\" (\"xslx\"), identifiers are interpreted as h5 group identifier (sheet\n name, respectively).\n If `fmt` is \"pickle\", \"csv\" or \"tsv\", identifiers are appended to `filename`.\n Path to the file can't have a dot (\".\") in it.\n\n Parameters\n ----------\n filepath : str\n Full path to the file(s), without extension.\n fmt : {\"h5\", \"csv\", \"pickle\", \"xlsx\"}\n File(s) format.\n identifiers : list of str, optional\n List of identifiers to load from files. Defaults to the ones saved in\n cuisto.process.process_animals().\n\n Returns\n -------\n All requested DataFrames.\n\n \"\"\"\n # ensure filename without extension\n base_path = os.path.splitext(filepath)[0]\n full_path = base_path + \".\" + fmt\n\n res = []\n if (fmt == \"h5\") or (fmt == \"hdf\") or (fmt == \"hdf5\"):\n for identifier in identifiers:\n res.append(pd.read_hdf(full_path, identifier))\n elif fmt == \"xlsx\":\n for identifier in identifiers:\n res.append(pd.read_excel(full_path, sheet_name=identifier))\n else:\n for identifier in identifiers:\n id_path = f\"{base_path}_{identifier}.{fmt}\"\n if (fmt == \"pickle\") or (fmt == \"pkl\"):\n res.append(pd.read_pickle(id_path))\n elif fmt == \"csv\":\n res.append(pd.read_csv(id_path))\n elif fmt == \"tsv\":\n res.append(pd.read_csv(id_path, sep=\"\\t\"))\n else:\n raise ValueError(f\"{fmt} is not supported.\")\n\n return res\n
"},{"location":"api-io.html#cuisto.io.save_dfs","title":"save_dfs(out_dir, filename, dfs)
","text":"Save DataFrames to file.
File format is inferred from file name extension.
Parameters:
Name Type Description Defaultout_dir
str
Output directory.
requiredfilename
_type_
File name.
requireddfs
dict
DataFrames to save, as {identifier: df}. If HDF5 or xlsx, all df are saved in the same file, otherwise identifier is appended to the file name.
required Source code incuisto/io.py
def save_dfs(out_dir: str, filename, dfs: dict):\n \"\"\"\n Save DataFrames to file.\n\n File format is inferred from file name extension.\n\n Parameters\n ----------\n out_dir : str\n Output directory.\n filename : _type_\n File name.\n dfs : dict\n DataFrames to save, as {identifier: df}. If HDF5 or xlsx, all df are saved in\n the same file, otherwise identifier is appended to the file name.\n\n \"\"\"\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n basename, ext = os.path.splitext(filename)\n if ext in [\".h5\", \".hdf\", \".hdf5\"]:\n path = os.path.join(out_dir, filename)\n for identifier, df in dfs.items():\n df.to_hdf(path, key=identifier)\n elif ext == \".xlsx\":\n for identifier, df in dfs.items():\n df.to_excel(path, sheet_name=identifier)\n else:\n for identifier, df in dfs.items():\n path = os.path.join(out_dir, f\"{basename}_{identifier}{ext}\")\n if ext in [\".pickle\", \".pkl\"]:\n df.to_pickle(path)\n elif ext == \".csv\":\n df.to_csv(path)\n elif ext == \".tsv\":\n df.to_csv(path, sep=\"\\t\")\n else:\n raise ValueError(f\"{filename} has an unsupported extension.\")\n
"},{"location":"api-process.html","title":"cuisto.process","text":"process module, part of cuisto.
Wraps other functions for a click&play behaviour. Relies on the configuration file.
"},{"location":"api-process.html#cuisto.process.process_animal","title":"process_animal(animal, df_annotations, df_detections, cfg, compute_distributions=True)
","text":"Quantify objects for one animal.
Fetch required files and compute objects' distributions in brain regions, spatial distributions and gather Atlas coordinates.
Parameters:
Name Type Description Defaultanimal
str
Animal ID.
requireddf_annotations
DataFrame
DataFrames of QuPath Annotations and Detections.
requireddf_detections
DataFrame
DataFrames of QuPath Annotations and Detections.
requiredcfg
Config
The configuration loaded from TOML configuration file.
requiredcompute_distributions
bool
If False, do not compute the 1D distributions and return an empty list.Default is True.
True
Returns:
Name Type Descriptiondf_regions
DataFrame
Metrics in brain regions. One entry for each hemisphere of each brain regions.
df_distribution
list of pandas.DataFrame
Rostro-caudal distribution, as raw count and probability density function, in each axis.
df_coordinates
DataFrame
Atlas coordinates of each points.
Source code incuisto/process.py
def process_animal(\n animal: str,\n df_annotations: pd.DataFrame,\n df_detections: pd.DataFrame,\n cfg,\n compute_distributions: bool = True,\n) -> tuple[pd.DataFrame, list[pd.DataFrame], pd.DataFrame]:\n \"\"\"\n Quantify objects for one animal.\n\n Fetch required files and compute objects' distributions in brain regions, spatial\n distributions and gather Atlas coordinates.\n\n Parameters\n ----------\n animal : str\n Animal ID.\n df_annotations, df_detections : pd.DataFrame\n DataFrames of QuPath Annotations and Detections.\n cfg : cuisto.Config\n The configuration loaded from TOML configuration file.\n compute_distributions : bool, optional\n If False, do not compute the 1D distributions and return an empty list.Default\n is True.\n\n Returns\n -------\n df_regions : pandas.DataFrame\n Metrics in brain regions. One entry for each hemisphere of each brain regions.\n df_distribution : list of pandas.DataFrame\n Rostro-caudal distribution, as raw count and probability density function, in\n each axis.\n df_coordinates : pandas.DataFrame\n Atlas coordinates of each points.\n\n \"\"\"\n # - Annotations data cleanup\n # filter regions\n df_annotations = utils.filter_df_regions(\n df_annotations, [\"Root\", \"root\"], mode=\"remove\", col=\"Name\"\n )\n df_annotations = utils.filter_df_regions(\n df_annotations, cfg.atlas[\"blacklist\"], mode=\"remove\", col=\"Name\"\n )\n # add hemisphere\n df_annotations = utils.add_hemisphere(df_annotations, cfg.hemispheres[\"names\"])\n # remove objects in non-leaf regions\n df_annotations = utils.filter_df_regions(\n df_annotations, cfg.atlas[\"leaveslist\"], mode=\"keep\", col=\"Name\"\n )\n # merge regions\n df_annotations = utils.merge_regions(\n df_annotations, col=\"Name\", fusion_file=cfg.files[\"fusion\"]\n )\n if compute_distributions:\n # - Detections data cleanup\n # remove objects not in selected classifications\n df_detections = utils.filter_df_classifications(\n df_detections, cfg.object_type, mode=\"keep\", col=\"Classification\"\n )\n # remove objects from blacklisted regions and \"Root\"\n df_detections = utils.filter_df_regions(\n df_detections, cfg.atlas[\"blacklist\"], mode=\"remove\", col=\"Parent\"\n )\n # add hemisphere\n df_detections = utils.add_hemisphere(\n df_detections,\n cfg.hemispheres[\"names\"],\n cfg.atlas[\"midline\"],\n col=\"Atlas_Z\",\n atlas_type=cfg.atlas[\"type\"],\n )\n # add detection channel\n df_detections = utils.add_channel(\n df_detections, cfg.object_type, cfg.channels[\"names\"]\n )\n # convert coordinates to mm\n df_detections[[\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"]] = df_detections[\n [\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"]\n ].divide(1000)\n # convert to sterotaxic coordinates\n if cfg.distributions[\"stereo\"]:\n (\n df_detections[\"Atlas_AP\"],\n df_detections[\"Atlas_DV\"],\n df_detections[\"Atlas_ML\"],\n ) = utils.ccf_to_stereo(\n df_detections[\"Atlas_X\"],\n df_detections[\"Atlas_Y\"],\n df_detections[\"Atlas_Z\"],\n )\n else:\n (\n df_detections[\"Atlas_AP\"],\n df_detections[\"Atlas_DV\"],\n df_detections[\"Atlas_ML\"],\n ) = (\n df_detections[\"Atlas_X\"],\n df_detections[\"Atlas_Y\"],\n df_detections[\"Atlas_Z\"],\n )\n\n # - Computations\n # get regions distributions\n df_regions = compute.get_regions_metrics(\n df_annotations,\n cfg.object_type,\n cfg.channels[\"names\"],\n cfg.regions[\"base_measurement\"],\n cfg.regions[\"metrics\"],\n )\n colstonorm = [v for v in cfg.regions[\"metrics\"].values() if \"relative\" not in v]\n\n # normalize by starter cells\n if cfg.regions[\"normalize_starter_cells\"]:\n df_regions = compute.normalize_starter_cells(\n df_regions, colstonorm, animal, cfg.files[\"infos\"], cfg.channels[\"names\"]\n )\n\n # get AP, DV, ML distributions in stereotaxic coordinates\n if compute_distributions:\n dfs_distributions = [\n compute.get_distribution(\n df_detections,\n axis,\n cfg.distributions[\"hue\"],\n cfg.distributions[\"hue_filter\"],\n cfg.distributions[\"common_norm\"],\n stereo_lim,\n nbins=nbins,\n )\n for axis, stereo_lim, nbins in zip(\n [\"Atlas_AP\", \"Atlas_DV\", \"Atlas_ML\"],\n [\n cfg.distributions[\"ap_lim\"],\n cfg.distributions[\"dv_lim\"],\n cfg.distributions[\"ml_lim\"],\n ],\n [\n cfg.distributions[\"ap_nbins\"],\n cfg.distributions[\"dv_nbins\"],\n cfg.distributions[\"dv_nbins\"],\n ],\n )\n ]\n else:\n dfs_distributions = []\n\n # add animal tag to each DataFrame\n df_detections[\"animal\"] = animal\n df_regions[\"animal\"] = animal\n for df in dfs_distributions:\n df[\"animal\"] = animal\n\n return df_regions, dfs_distributions, df_detections\n
"},{"location":"api-process.html#cuisto.process.process_animals","title":"process_animals(wdir, animals, cfg, out_fmt=None, compute_distributions=True)
","text":"Get data from all animals and plot.
Parameters:
Name Type Description Defaultwdir
str
Base working directory, containing animals
folders.
animals
list-like of str
List of animals ID.
requiredcfg
Configuration object.
requiredout_fmt
(None, h5, csv, tsv, xslx, pickle)
Output file(s) format, if None, nothing is saved (default).
None
compute_distributions
bool
If False, do not compute the 1D distributions and return an empty list.Default is True.
True
Returns:
Name Type Descriptiondf_regions
DataFrame
Metrics in brain regions. One entry for each hemisphere of each brain regions.
df_distribution
list of pandas.DataFrame
Rostro-caudal distribution, as raw count and probability density function, in each axis.
df_coordinates
DataFrame
Atlas coordinates of each points.
Source code incuisto/process.py
def process_animals(\n wdir: str,\n animals: list[str] | tuple[str],\n cfg,\n out_fmt: str | None = None,\n compute_distributions: bool = True,\n) -> tuple[pd.DataFrame]:\n \"\"\"\n Get data from all animals and plot.\n\n Parameters\n ----------\n wdir : str\n Base working directory, containing `animals` folders.\n animals : list-like of str\n List of animals ID.\n cfg: cuisto.Config\n Configuration object.\n out_fmt : {None, \"h5\", \"csv\", \"tsv\", \"xslx\", \"pickle\"}\n Output file(s) format, if None, nothing is saved (default).\n compute_distributions : bool, optional\n If False, do not compute the 1D distributions and return an empty list.Default\n is True.\n\n\n Returns\n -------\n df_regions : pandas.DataFrame\n Metrics in brain regions. One entry for each hemisphere of each brain regions.\n df_distribution : list of pandas.DataFrame\n Rostro-caudal distribution, as raw count and probability density function, in\n each axis.\n df_coordinates : pandas.DataFrame\n Atlas coordinates of each points.\n\n \"\"\"\n\n # -- Preparation\n df_regions = []\n dfs_distributions = []\n df_coordinates = []\n\n # -- Processing\n pbar = tqdm(animals)\n\n for animal in pbar:\n pbar.set_description(f\"Processing {animal}\")\n\n # combine all detections and annotations from this animal\n df_annotations = io.cat_csv_dir(\n io.get_measurements_directory(\n wdir, animal, \"annotation\", cfg.segmentation_tag\n ),\n index_col=\"Object ID\",\n sep=\"\\t\",\n )\n if compute_distributions:\n df_detections = io.cat_data_dir(\n io.get_measurements_directory(\n wdir, animal, \"detection\", cfg.segmentation_tag\n ),\n cfg.segmentation_tag,\n index_col=\"Object ID\",\n sep=\"\\t\",\n hemisphere_names=cfg.hemispheres[\"names\"],\n atlas=cfg.bg_atlas,\n )\n else:\n df_detections = pd.DataFrame()\n\n # get results\n df_reg, dfs_dis, df_coo = process_animal(\n animal,\n df_annotations,\n df_detections,\n cfg,\n compute_distributions=compute_distributions,\n )\n\n # collect results\n df_regions.append(df_reg)\n dfs_distributions.append(dfs_dis)\n df_coordinates.append(df_coo)\n\n # concatenate all results\n df_regions = pd.concat(df_regions, ignore_index=True)\n dfs_distributions = [\n pd.concat(dfs_list, ignore_index=True) for dfs_list in zip(*dfs_distributions)\n ]\n df_coordinates = pd.concat(df_coordinates, ignore_index=True)\n\n # -- Saving\n if out_fmt:\n outdir = os.path.join(wdir, \"quantification\")\n outfile = f\"{cfg.object_type.lower()}_{cfg.atlas[\"type\"]}_{'-'.join(animals)}.{out_fmt}\"\n dfs = dict(\n df_regions=df_regions,\n df_coordinates=df_coordinates,\n df_distribution_ap=dfs_distributions[0],\n df_distribution_dv=dfs_distributions[1],\n df_distribution_ml=dfs_distributions[2],\n )\n io.save_dfs(outdir, outfile, dfs)\n\n return df_regions, dfs_distributions, df_coordinates\n
"},{"location":"api-script-qupath-script-runner.html","title":"qupath_script_runner","text":"Template to show how to run groovy script with QuPath, multi-threaded.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.EXCLUDE_LIST","title":"EXCLUDE_LIST = []
module-attribute
","text":"Images names to NOT run the script on.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.NTHREADS","title":"NTHREADS = 5
module-attribute
","text":"Number of threads to use.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.QPROJ_PATH","title":"QPROJ_PATH = '/path/to/qupath/project.qproj'
module-attribute
","text":"Full path to the QuPath project.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.QUIET","title":"QUIET = True
module-attribute
","text":"Use QuPath in quiet mode, eg. with minimal verbosity.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.QUPATH_EXE","title":"QUPATH_EXE = '/path/to/the/qupath/QuPath-0.5.1 (console).exe'
module-attribute
","text":"Path to the QuPath executable (console mode).
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.SAVE","title":"SAVE = True
module-attribute
","text":"Whether to save the project after the script ran on an image.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.SCRIPT_PATH","title":"SCRIPT_PATH = '/path/to/the/script.groovy'
module-attribute
","text":"Path to the groovy script.
"},{"location":"api-script-segment.html","title":"segment_images","text":"Script to segment objects from images.
For fiber-like objects, binarize and skeletonize the image, then use skan
to extract branches coordinates. For polygon-like objects, binarize the image and detect objects and extract contours coordinates. For points, treat that as polygons then extract the centroids instead of contours. Finally, export the coordinates as collections in geojson files, importable in QuPath. Supports any number of channel of interest within the same image. One file output file per channel will be created.
This script uses cuisto.seg
. It is designed to work on probability maps generated from a pixel classifier in QuPath, but might work on raw images.
Usage : fill-in the Parameters section of the script and run it. A \"geojson\" folder will be created in the parent directory of IMAGES_DIR
. To exclude objects near the edges of an ROI, specify the path to masks stored as images with the same names as probabilities images (without their suffix).
author : Guillaume Le Goc (g.legoc@posteo.org) @ NeuroPSI version : 2024.12.10
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.CHANNELS_PARAMS","title":"CHANNELS_PARAMS = [{'name': 'cy5', 'target_channel': 0, 'proba_threshold': 0.85, 'qp_class': 'Fibers: Cy5', 'qp_color': [164, 250, 120]}, {'name': 'dsred', 'target_channel': 1, 'proba_threshold': 0.65, 'qp_class': 'Fibers: DsRed', 'qp_color': [224, 153, 18]}, {'name': 'egfp', 'target_channel': 2, 'proba_threshold': 0.85, 'qp_class': 'Fibers: EGFP', 'qp_color': [135, 11, 191]}]
module-attribute
","text":"This should be a list of dictionary (one per channel) with keys :
EDGE_DIST = 0
module-attribute
","text":"Distance to brain edge to ignore, in \u00b5m. 0 to disable.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.FILTERS","title":"FILTERS = {'length_low': 1.5, 'area_low': 10, 'area_high': 1000, 'ecc_low': 0.0, 'ecc_high': 0.9, 'dist_thresh': 30}
module-attribute
","text":"Dictionary with keys :
IMAGES_DIR = '/path/to/images'
module-attribute
","text":"Full path to the images to segment.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.IMG_SUFFIX","title":"IMG_SUFFIX = '_Probabilities.tiff'
module-attribute
","text":"Images suffix, including extension. Masks must be the same name without the suffix.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.MASKS_DIR","title":"MASKS_DIR = 'path/to/corresponding/masks'
module-attribute
","text":"Full path to the masks, to exclude objects near the brain edges (set to None or empty string to disable this feature).
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.MASKS_EXT","title":"MASKS_EXT = 'tiff'
module-attribute
","text":"Masks files extension.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.MAX_PIX_VALUE","title":"MAX_PIX_VALUE = 255
module-attribute
","text":"Maximum pixel possible value to adjust proba_threshold
.
ORIGINAL_PIXELSIZE = 0.45
module-attribute
","text":"Original images pixel size in microns. This is in case the pixel classifier uses a lower resolution, yielding smaller probability maps, so output objects coordinates need to be rescaled to the full size images. The pixel size is written in the \"Image\" tab in QuPath.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.QUPATH_TYPE","title":"QUPATH_TYPE = 'detection'
module-attribute
","text":"QuPath object type.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.SEGTYPE","title":"SEGTYPE = 'boutons'
module-attribute
","text":"Type of segmentation.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.get_geojson_dir","title":"get_geojson_dir(images_dir)
","text":"Get the directory of geojson files, which will be in the parent directory of images_dir
.
If the directory does not exist, create it.
Parameters:
Name Type Description Defaultimages_dir
str
required Returns:
Name Type Descriptiongeojson_dir
str
Source code in scripts/segmentation/segment_images.py
def get_geojson_dir(images_dir: str):\n \"\"\"\n Get the directory of geojson files, which will be in the parent directory\n of `images_dir`.\n\n If the directory does not exist, create it.\n\n Parameters\n ----------\n images_dir : str\n\n Returns\n -------\n geojson_dir : str\n\n \"\"\"\n\n geojson_dir = os.path.join(Path(images_dir).parent, \"geojson\")\n\n if not os.path.isdir(geojson_dir):\n os.mkdir(geojson_dir)\n\n return geojson_dir\n
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.get_geojson_properties","title":"get_geojson_properties(name, color, objtype='detection')
","text":"Return geojson objects properties as a dictionnary, ready to be used in geojson.Feature.
Parameters:
Name Type Description Defaultname
str
Classification name.
requiredcolor
tuple or list
Classification color in RGB (3-elements vector).
requiredobjtype
str
Object type (\"detection\" or \"annotation\"). Default is \"detection\".
'detection'
Returns:
Name Type Descriptionprops
dict
Source code in scripts/segmentation/segment_images.py
def get_geojson_properties(name: str, color: tuple | list, objtype: str = \"detection\"):\n \"\"\"\n Return geojson objects properties as a dictionnary, ready to be used in geojson.Feature.\n\n Parameters\n ----------\n name : str\n Classification name.\n color : tuple or list\n Classification color in RGB (3-elements vector).\n objtype : str, optional\n Object type (\"detection\" or \"annotation\"). Default is \"detection\".\n\n Returns\n -------\n props : dict\n\n \"\"\"\n\n return {\n \"objectType\": objtype,\n \"classification\": {\"name\": name, \"color\": color},\n \"isLocked\": \"true\",\n }\n
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.get_seg_method","title":"get_seg_method(segtype)
","text":"Determine what kind of segmentation is performed.
Segmentation kind are, for now, lines, polygons or points. We detect that based on hardcoded keywords.
Parameters:
Name Type Description Defaultsegtype
str
required Returns:
Name Type Descriptionseg_method
str
Source code in scripts/segmentation/segment_images.py
def get_seg_method(segtype: str):\n \"\"\"\n Determine what kind of segmentation is performed.\n\n Segmentation kind are, for now, lines, polygons or points. We detect that based on\n hardcoded keywords.\n\n Parameters\n ----------\n segtype : str\n\n Returns\n -------\n seg_method : str\n\n \"\"\"\n\n line_list = [\"fibers\", \"axons\", \"fiber\", \"axon\"]\n point_list = [\"synapto\", \"synaptophysin\", \"syngfp\", \"boutons\", \"points\"]\n polygon_list = [\"cells\", \"polygon\", \"polygons\", \"polygon\", \"cell\"]\n\n if segtype in line_list:\n seg_method = \"lines\"\n elif segtype in polygon_list:\n seg_method = \"polygons\"\n elif segtype in point_list:\n seg_method = \"points\"\n else:\n raise ValueError(\n f\"Could not determine method to use based on segtype : {segtype}.\"\n )\n\n return seg_method\n
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.parameters_as_dict","title":"parameters_as_dict(images_dir, masks_dir, segtype, name, proba_threshold, edge_dist)
","text":"Get information as a dictionnary.
Parameters:
Name Type Description Defaultimages_dir
str
Path to images to be segmented.
requiredmasks_dir
str
Path to images masks.
requiredsegtype
str
Segmentation type (eg. \"fibers\").
requiredname
str
Name of the segmentation (eg. \"green\").
requiredproba_threshold
float < 1
Probability threshold.
requirededge_dist
float
Distance in \u00b5m to the brain edge that is ignored.
requiredReturns:
Name Type Descriptionparams
dict
Source code in scripts/segmentation/segment_images.py
def parameters_as_dict(\n images_dir: str,\n masks_dir: str,\n segtype: str,\n name: str,\n proba_threshold: float,\n edge_dist: float,\n):\n \"\"\"\n Get information as a dictionnary.\n\n Parameters\n ----------\n images_dir : str\n Path to images to be segmented.\n masks_dir : str\n Path to images masks.\n segtype : str\n Segmentation type (eg. \"fibers\").\n name : str\n Name of the segmentation (eg. \"green\").\n proba_threshold : float < 1\n Probability threshold.\n edge_dist : float\n Distance in \u00b5m to the brain edge that is ignored.\n\n Returns\n -------\n params : dict\n\n \"\"\"\n\n return {\n \"images_location\": images_dir,\n \"masks_location\": masks_dir,\n \"type\": segtype,\n \"probability threshold\": proba_threshold,\n \"name\": name,\n \"edge distance\": edge_dist,\n }\n
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.process_directory","title":"process_directory(images_dir, img_suffix='', segtype='', original_pixelsize=1.0, target_channel=0, proba_threshold=0.0, qupath_class='Object', qupath_color=[0, 0, 0], channel_suffix='', edge_dist=0.0, filters={}, masks_dir='', masks_ext='')
","text":"Main function, processes the .ome.tiff files in the input directory.
Parameters:
Name Type Description Defaultimages_dir
str
Animal ID to process.
requiredimg_suffix
str
Images suffix, including extension.
''
segtype
str
Segmentation type.
''
original_pixelsize
float
Original images pixel size in microns.
1.0
target_channel
int
Index of the channel containning the objects of interest (eg. not the background), in the probability map (not the original images channels).
0
proba_threshold
float < 1
Probability below this value will be discarded (multiplied by MAX_PIXEL_VALUE
)
0.0
qupath_class
str
Name of the QuPath classification.
'Object'
qupath_color
list of three elements
Color associated to that classification in RGB.
[0, 0, 0]
channel_suffix
str
Channel name, will be used as a suffix in output geojson files.
''
edge_dist
float
Distance to the edge of the brain masks that will be ignored, in microns. Set to 0 to disable this feature.
0.0
filters
dict
Filters values to include or excludes objects. See the top of the script.
{}
masks_dir
str
Path to images masks, to exclude objects found near the edges. The masks must be with the same name as the corresponding image to be segmented, without its suffix. Default is \"\", which disables this feature.
''
masks_ext
str
Masks files extension, without leading \".\". Default is \"\"
''
Source code in scripts/segmentation/segment_images.py
def process_directory(\n images_dir: str,\n img_suffix: str = \"\",\n segtype: str = \"\",\n original_pixelsize: float = 1.0,\n target_channel: int = 0,\n proba_threshold: float = 0.0,\n qupath_class: str = \"Object\",\n qupath_color: list = [0, 0, 0],\n channel_suffix: str = \"\",\n edge_dist: float = 0.0,\n filters: dict = {},\n masks_dir: str = \"\",\n masks_ext: str = \"\",\n):\n \"\"\"\n Main function, processes the .ome.tiff files in the input directory.\n\n Parameters\n ----------\n images_dir : str\n Animal ID to process.\n img_suffix : str\n Images suffix, including extension.\n segtype : str\n Segmentation type.\n original_pixelsize : float\n Original images pixel size in microns.\n target_channel : int\n Index of the channel containning the objects of interest (eg. not the\n background), in the probability map (*not* the original images channels).\n proba_threshold : float < 1\n Probability below this value will be discarded (multiplied by `MAX_PIXEL_VALUE`)\n qupath_class : str\n Name of the QuPath classification.\n qupath_color : list of three elements\n Color associated to that classification in RGB.\n channel_suffix : str\n Channel name, will be used as a suffix in output geojson files.\n edge_dist : float\n Distance to the edge of the brain masks that will be ignored, in microns. Set to\n 0 to disable this feature.\n filters : dict\n Filters values to include or excludes objects. See the top of the script.\n masks_dir : str, optional\n Path to images masks, to exclude objects found near the edges. The masks must be\n with the same name as the corresponding image to be segmented, without its\n suffix. Default is \"\", which disables this feature.\n masks_ext : str, optional\n Masks files extension, without leading \".\". Default is \"\"\n\n \"\"\"\n\n # -- Preparation\n # get segmentation type\n seg_method = get_seg_method(segtype)\n\n # get output directory path\n geojson_dir = get_geojson_dir(images_dir)\n\n # get images list\n images_list = [\n os.path.join(images_dir, filename)\n for filename in os.listdir(images_dir)\n if filename.endswith(img_suffix)\n ]\n\n # write parameters\n parameters = parameters_as_dict(\n images_dir, masks_dir, segtype, channel_suffix, proba_threshold, edge_dist\n )\n param_file = os.path.join(geojson_dir, \"parameters\" + channel_suffix + \".txt\")\n if os.path.isfile(param_file):\n raise FileExistsError(\"Parameters file already exists.\")\n else:\n write_parameters(param_file, parameters, filters, original_pixelsize)\n\n # convert parameters to pixels in probability map\n pixelsize = hq.seg.get_pixelsize(images_list[0]) # get pixel size\n edge_dist = int(edge_dist / pixelsize)\n filters = hq.seg.convert_to_pixels(filters, pixelsize)\n\n # get rescaling factor\n rescale_factor = pixelsize / original_pixelsize\n\n # get GeoJSON properties\n geojson_props = get_geojson_properties(\n qupath_class, qupath_color, objtype=QUPATH_TYPE\n )\n\n # -- Processing\n pbar = tqdm(images_list)\n for imgpath in pbar:\n # build file names\n imgname = os.path.basename(imgpath)\n geoname = imgname.replace(img_suffix, \"\")\n geojson_file = os.path.join(\n geojson_dir, geoname + \"_segmentation\" + channel_suffix + \".geojson\"\n )\n\n # checks if output file already exists\n if os.path.isfile(geojson_file):\n continue\n\n # read images\n pbar.set_description(f\"{geoname}: Loading...\")\n img = tifffile.imread(imgpath, key=target_channel)\n if (edge_dist > 0) & (len(masks_dir) != 0):\n mask = tifffile.imread(os.path.join(masks_dir, geoname + \".\" + masks_ext))\n mask = hq.seg.pad_image(mask, img.shape) # resize mask\n # apply mask, eroding from the edges\n img = img * hq.seg.erode_mask(mask, edge_dist)\n\n # image processing\n pbar.set_description(f\"{geoname}: IP...\")\n\n # threshold probability and binarization\n img = img >= proba_threshold * MAX_PIX_VALUE\n\n # segmentation\n pbar.set_description(f\"{geoname}: Segmenting...\")\n\n if seg_method == \"lines\":\n collection = hq.seg.segment_lines(\n img,\n geojson_props,\n minsize=filters[\"length_low\"],\n rescale_factor=rescale_factor,\n )\n\n elif seg_method == \"polygons\":\n collection = hq.seg.segment_polygons(\n img,\n geojson_props,\n area_min=filters[\"area_low\"],\n area_max=filters[\"area_high\"],\n ecc_min=filters[\"ecc_low\"],\n ecc_max=filters[\"ecc_high\"],\n rescale_factor=rescale_factor,\n )\n\n elif seg_method == \"points\":\n collection = hq.seg.segment_points(\n img,\n geojson_props,\n area_min=filters[\"area_low\"],\n area_max=filters[\"area_high\"],\n ecc_min=filters[\"ecc_low\"],\n ecc_max=filters[\"ecc_high\"],\n dist_thresh=filters[\"dist_thresh\"],\n rescale_factor=rescale_factor,\n )\n else:\n # we already printed an error message\n return\n\n # save geojson\n pbar.set_description(f\"{geoname}: Saving...\")\n with open(geojson_file, \"w\") as fid:\n fid.write(geojson.dumps(collection))\n
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.write_parameters","title":"write_parameters(outfile, parameters, filters, original_pixelsize)
","text":"Write parameters to outfile
.
A timestamp will be added. Parameters are written as key = value, and a [filters] is added before filters parameters.
Parameters:
Name Type Description Defaultoutfile
str
Full path to the output file.
requiredparameters
dict
General parameters.
requiredfilters
dict
Filters parameters.
requiredoriginal_pixelsize
float
Size of pixels in original image.
required Source code inscripts/segmentation/segment_images.py
def write_parameters(\n outfile: str, parameters: dict, filters: dict, original_pixelsize: float\n):\n \"\"\"\n Write parameters to `outfile`.\n\n A timestamp will be added. Parameters are written as key = value,\n and a [filters] is added before filters parameters.\n\n Parameters\n ----------\n outfile : str\n Full path to the output file.\n parameters : dict\n General parameters.\n filters : dict\n Filters parameters.\n original_pixelsize : float\n Size of pixels in original image.\n\n \"\"\"\n\n with open(outfile, \"w\") as fid:\n fid.writelines(f\"date = {datetime.now().strftime('%d-%B-%Y %H:%M:%S')}\\n\")\n\n fid.writelines(f\"original_pixelsize = {original_pixelsize}\\n\")\n\n for key, value in parameters.items():\n fid.writelines(f\"{key} = {value}\\n\")\n\n fid.writelines(\"[filters]\\n\")\n\n for key, value in filters.items():\n fid.writelines(f\"{key} = {value}\\n\")\n
"},{"location":"api-seg.html","title":"cuisto.seg","text":"seg module, part of cuisto.
Functions for segmentating probability map stored as an image.
"},{"location":"api-seg.html#cuisto.seg.convert_to_pixels","title":"convert_to_pixels(filters, pixelsize)
","text":"Convert some values in filters
in pixels.
Parameters:
Name Type Description Defaultfilters
dict
Must contain the keys used below.
requiredpixelsize
float
Pixel size in microns.
requiredReturns:
Name Type Descriptionfilters
dict
Same as input, with values in pixels.
Source code incuisto/seg.py
def convert_to_pixels(filters, pixelsize):\n \"\"\"\n Convert some values in `filters` in pixels.\n\n Parameters\n ----------\n filters : dict\n Must contain the keys used below.\n pixelsize : float\n Pixel size in microns.\n\n Returns\n -------\n filters : dict\n Same as input, with values in pixels.\n\n \"\"\"\n\n filters[\"area_low\"] = filters[\"area_low\"] / pixelsize**2\n filters[\"area_high\"] = filters[\"area_high\"] / pixelsize**2\n filters[\"length_low\"] = filters[\"length_low\"] / pixelsize\n filters[\"dist_thresh\"] = int(filters[\"dist_thresh\"] / pixelsize)\n\n return filters\n
"},{"location":"api-seg.html#cuisto.seg.erode_mask","title":"erode_mask(mask, edge_dist)
","text":"Erode the mask outline so that is is edge_dist
smaller from the border.
This allows discarding the edges.
Parameters:
Name Type Description Defaultmask
ndarray
required edge_dist
float
Distance to edges, in pixels.
requiredReturns:
Name Type Descriptioneroded_mask
ndarray of bool
Source code in cuisto/seg.py
def erode_mask(mask: np.ndarray, edge_dist: float) -> np.ndarray:\n \"\"\"\n Erode the mask outline so that is is `edge_dist` smaller from the border.\n\n This allows discarding the edges.\n\n Parameters\n ----------\n mask : ndarray\n edge_dist : float\n Distance to edges, in pixels.\n\n Returns\n -------\n eroded_mask : ndarray of bool\n\n \"\"\"\n\n if edge_dist % 2 == 0:\n edge_dist += 1 # decomposition requires even number\n\n footprint = morphology.square(edge_dist, decomposition=\"sequence\")\n\n return mask * morphology.binary_erosion(mask, footprint=footprint)\n
"},{"location":"api-seg.html#cuisto.seg.get_collection_from_points","title":"get_collection_from_points(coords, properties, rescale_factor=1.0, offset=0.5)
","text":"Gather coordinates from coords
and put them in GeoJSON format.
An entry in coords
are pairs of (x, y) coordinates defining the point. properties
is a dictionnary with QuPath properties of each detections.
Parameters:
Name Type Description Defaultcoords
list
required properties
dict
required rescale_factor
float
Rescale output coordinates by this factor.
1.0
Returns:
Name Type Descriptioncollection
FeatureCollection
Source code in cuisto/seg.py
def get_collection_from_points(\n coords: list, properties: dict, rescale_factor: float = 1.0, offset: float = 0.5\n) -> geojson.FeatureCollection:\n \"\"\"\n Gather coordinates from `coords` and put them in GeoJSON format.\n\n An entry in `coords` are pairs of (x, y) coordinates defining the point.\n `properties` is a dictionnary with QuPath properties of each detections.\n\n Parameters\n ----------\n coords : list\n properties : dict\n rescale_factor : float\n Rescale output coordinates by this factor.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n\n \"\"\"\n\n collection = [\n geojson.Feature(\n geometry=shapely.Point(\n np.flip((coord + offset) * rescale_factor)\n ), # shape object\n properties=properties, # object properties\n id=str(uuid.uuid4()), # object uuid\n )\n for coord in coords\n ]\n\n return geojson.FeatureCollection(collection)\n
"},{"location":"api-seg.html#cuisto.seg.get_collection_from_poly","title":"get_collection_from_poly(contours, properties, rescale_factor=1.0, offset=0.5)
","text":"Gather coordinates in the list and put them in GeoJSON format as Polygons.
An entry in contours
must define a closed polygon. properties
is a dictionnary with QuPath properties of each detections.
Parameters:
Name Type Description Defaultcontours
list
required properties
dict
QuPatj objects' properties.
requiredrescale_factor
float
Rescale output coordinates by this factor.
1.0
offset
float
Shift coordinates by this amount, typically to get pixel centers or edges. Default is 0.5.
0.5
Returns:
Name Type Descriptioncollection
FeatureCollection
A FeatureCollection ready to be written as geojson.
Source code incuisto/seg.py
def get_collection_from_poly(\n contours: list, properties: dict, rescale_factor: float = 1.0, offset: float = 0.5\n) -> geojson.FeatureCollection:\n \"\"\"\n Gather coordinates in the list and put them in GeoJSON format as Polygons.\n\n An entry in `contours` must define a closed polygon. `properties` is a dictionnary\n with QuPath properties of each detections.\n\n Parameters\n ----------\n contours : list\n properties : dict\n QuPatj objects' properties.\n rescale_factor : float\n Rescale output coordinates by this factor.\n offset : float\n Shift coordinates by this amount, typically to get pixel centers or edges.\n Default is 0.5.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n A FeatureCollection ready to be written as geojson.\n\n \"\"\"\n collection = [\n geojson.Feature(\n geometry=shapely.Polygon(\n np.fliplr((contour + offset) * rescale_factor)\n ), # shape object\n properties=properties, # object properties\n id=str(uuid.uuid4()), # object uuid\n )\n for contour in contours\n ]\n\n return geojson.FeatureCollection(collection)\n
"},{"location":"api-seg.html#cuisto.seg.get_collection_from_skel","title":"get_collection_from_skel(skeleton, properties, rescale_factor=1.0, offset=0.5)
","text":"Get the coordinates of each skeleton path as a GeoJSON Features in a FeatureCollection. properties
is a dictionnary with QuPath properties of each detections.
Parameters:
Name Type Description Defaultskeleton
Skeleton
required properties
dict
QuPatj objects' properties.
requiredrescale_factor
float
Rescale output coordinates by this factor.
1.0
offset
float
Shift coordinates by this amount, typically to get pixel centers or edges. Default is 0.5.
0.5
Returns:
Name Type Descriptioncollection
FeatureCollection
A FeatureCollection ready to be written as geojson.
Source code incuisto/seg.py
def get_collection_from_skel(\n skeleton: Skeleton, properties: dict, rescale_factor: float = 1.0, offset=0.5\n) -> geojson.FeatureCollection:\n \"\"\"\n Get the coordinates of each skeleton path as a GeoJSON Features in a\n FeatureCollection.\n `properties` is a dictionnary with QuPath properties of each detections.\n\n Parameters\n ----------\n skeleton : skan.Skeleton\n properties : dict\n QuPatj objects' properties.\n rescale_factor : float\n Rescale output coordinates by this factor.\n offset : float\n Shift coordinates by this amount, typically to get pixel centers or edges.\n Default is 0.5.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n A FeatureCollection ready to be written as geojson.\n\n \"\"\"\n\n branch_data = summarize(skeleton, separator=\"_\")\n\n collection = []\n for ind in range(skeleton.n_paths):\n prop = properties.copy()\n prop[\"measurements\"] = {\"skeleton_id\": int(branch_data.loc[ind, \"skeleton_id\"])}\n collection.append(\n geojson.Feature(\n geometry=shapely.LineString(\n (skeleton.path_coordinates(ind)[:, ::-1] + offset) * rescale_factor\n ), # shape object\n properties=prop, # object properties\n id=str(uuid.uuid4()), # object uuid\n )\n )\n\n return geojson.FeatureCollection(collection)\n
"},{"location":"api-seg.html#cuisto.seg.get_image_skeleton","title":"get_image_skeleton(img, minsize=0)
","text":"Get the image skeleton.
Computes the image skeleton and removes objects smaller than minsize
.
Parameters:
Name Type Description Defaultimg
ndarray of bool
required minsize
number
Min. size the object can have, as a number of pixels. Default is 0.
0
Returns:
Name Type Descriptionskel
ndarray of bool
Binary image with 1-pixel wide skeleton.
Source code incuisto/seg.py
def get_image_skeleton(img: np.ndarray, minsize=0) -> np.ndarray:\n \"\"\"\n Get the image skeleton.\n\n Computes the image skeleton and removes objects smaller than `minsize`.\n\n Parameters\n ----------\n img : ndarray of bool\n minsize : number, optional\n Min. size the object can have, as a number of pixels. Default is 0.\n\n Returns\n -------\n skel : ndarray of bool\n Binary image with 1-pixel wide skeleton.\n\n \"\"\"\n\n skel = morphology.skeletonize(img)\n\n return morphology.remove_small_objects(skel, min_size=minsize, connectivity=2)\n
"},{"location":"api-seg.html#cuisto.seg.get_pixelsize","title":"get_pixelsize(image_name)
","text":"Get pixel size recorded in image_name
TIFF metadata.
Parameters:
Name Type Description Defaultimage_name
str
Full path to image.
requiredReturns:
Name Type Descriptionpixelsize
float
Pixel size in microns.
Source code incuisto/seg.py
def get_pixelsize(image_name: str) -> float:\n \"\"\"\n Get pixel size recorded in `image_name` TIFF metadata.\n\n Parameters\n ----------\n image_name : str\n Full path to image.\n\n Returns\n -------\n pixelsize : float\n Pixel size in microns.\n\n \"\"\"\n\n with tifffile.TiffFile(image_name) as tif:\n # XResolution is a tuple, numerator, denomitor. The inverse is the pixel size\n return (\n tif.pages[0].tags[\"XResolution\"].value[1]\n / tif.pages[0].tags[\"XResolution\"].value[0]\n )\n
"},{"location":"api-seg.html#cuisto.seg.pad_image","title":"pad_image(img, finalsize)
","text":"Pad image with zeroes to match expected final size.
Parameters:
Name Type Description Defaultimg
ndarray
required finalsize
tuple or list
nrows, ncolumns
requiredReturns:
Name Type Descriptionimgpad
ndarray
img with black borders.
Source code incuisto/seg.py
def pad_image(img: np.ndarray, finalsize: tuple | list) -> np.ndarray:\n \"\"\"\n Pad image with zeroes to match expected final size.\n\n Parameters\n ----------\n img : ndarray\n finalsize : tuple or list\n nrows, ncolumns\n\n Returns\n -------\n imgpad : ndarray\n img with black borders.\n\n \"\"\"\n\n final_h = finalsize[0] # requested number of rows (height)\n final_w = finalsize[1] # requested number of columns (width)\n original_h = img.shape[0] # input number of rows\n original_w = img.shape[1] # input number of columns\n\n a = (final_h - original_h) // 2 # vertical padding before\n aa = final_h - a - original_h # vertical padding after\n b = (final_w - original_w) // 2 # horizontal padding before\n bb = final_w - b - original_w # horizontal padding after\n\n return np.pad(img, pad_width=((a, aa), (b, bb)), mode=\"constant\")\n
"},{"location":"api-seg.html#cuisto.seg.segment_lines","title":"segment_lines(img, geojson_props, minsize=0.0, rescale_factor=1.0)
","text":"Wraps skeleton analysis to get paths coordinates.
Parameters:
Name Type Description Defaultimg
ndarray of bool
Binary image to segment as lines.
requiredgeojson_props
dict
GeoJSON properties of objects.
requiredminsize
float
Minimum size in pixels for an object.
0.0
rescale_factor
float
Rescale output coordinates by this factor.
1.0
Returns:
Name Type Descriptioncollection
FeatureCollection
A FeatureCollection ready to be written as geojson.
Source code incuisto/seg.py
def segment_lines(\n img: np.ndarray, geojson_props: dict, minsize=0.0, rescale_factor=1.0\n) -> geojson.FeatureCollection:\n \"\"\"\n Wraps skeleton analysis to get paths coordinates.\n\n Parameters\n ----------\n img : ndarray of bool\n Binary image to segment as lines.\n geojson_props : dict\n GeoJSON properties of objects.\n minsize : float\n Minimum size in pixels for an object.\n rescale_factor : float\n Rescale output coordinates by this factor.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n A FeatureCollection ready to be written as geojson.\n\n \"\"\"\n\n skel = get_image_skeleton(img, minsize=minsize)\n\n # get paths coordinates as FeatureCollection\n skeleton = Skeleton(skel, keep_images=False)\n return get_collection_from_skel(\n skeleton, geojson_props, rescale_factor=rescale_factor\n )\n
"},{"location":"api-seg.html#cuisto.seg.segment_points","title":"segment_points(img, geojson_props, area_min=0.0, area_max=np.inf, ecc_min=0, ecc_max=1, dist_thresh=0, rescale_factor=1)
","text":"Point segmentation.
First, segment polygons to apply shape filters, then extract their centroids, and remove isolated points as defined by dist_thresh
.
Parameters:
Name Type Description Defaultimg
ndarray of bool
Binary image to segment as points.
requiredgeojson_props
dict
GeoJSON properties of objects.
requiredarea_min
float
Minimum and maximum area in pixels for an object.
0.0
area_max
float
Minimum and maximum area in pixels for an object.
0.0
ecc_min
float
Minimum and maximum eccentricity for an object.
0
ecc_max
float
Minimum and maximum eccentricity for an object.
0
dist_thresh
float
Maximal distance in pixels between objects before considering them as isolated and remove them. 0 disables it.
0
rescale_factor
float
Rescale output coordinates by this factor.
1
Returns:
Name Type Descriptioncollection
FeatureCollection
A FeatureCollection ready to be written as geojson.
Source code incuisto/seg.py
def segment_points(\n img: np.ndarray,\n geojson_props: dict,\n area_min: float = 0.0,\n area_max: float = np.inf,\n ecc_min: float = 0,\n ecc_max: float = 1,\n dist_thresh: float = 0,\n rescale_factor: float = 1,\n) -> geojson.FeatureCollection:\n \"\"\"\n Point segmentation.\n\n First, segment polygons to apply shape filters, then extract their centroids,\n and remove isolated points as defined by `dist_thresh`.\n\n Parameters\n ----------\n img : ndarray of bool\n Binary image to segment as points.\n geojson_props : dict\n GeoJSON properties of objects.\n area_min, area_max : float\n Minimum and maximum area in pixels for an object.\n ecc_min, ecc_max : float\n Minimum and maximum eccentricity for an object.\n dist_thresh : float\n Maximal distance in pixels between objects before considering them as isolated and remove them.\n 0 disables it.\n rescale_factor : float\n Rescale output coordinates by this factor.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n A FeatureCollection ready to be written as geojson.\n\n \"\"\"\n\n # get objects properties\n stats = pd.DataFrame(\n measure.regionprops_table(\n measure.label(img), properties=(\"label\", \"area\", \"eccentricity\", \"centroid\")\n )\n )\n\n # keep objects matching filters\n stats = stats[\n (stats[\"area\"] >= area_min)\n & (stats[\"area\"] <= area_max)\n & (stats[\"eccentricity\"] >= ecc_min)\n & (stats[\"eccentricity\"] <= ecc_max)\n ]\n\n # create an image from centroids only\n stats[\"centroid-0\"] = stats[\"centroid-0\"].astype(int)\n stats[\"centroid-1\"] = stats[\"centroid-1\"].astype(int)\n bw = np.zeros(img.shape, dtype=bool)\n bw[stats[\"centroid-0\"], stats[\"centroid-1\"]] = True\n\n # filter isolated objects\n if dist_thresh:\n # dilation of points\n if dist_thresh % 2 == 0:\n dist_thresh += 1 # decomposition requires even number\n\n footprint = morphology.square(int(dist_thresh), decomposition=\"sequence\")\n dilated = measure.label(morphology.binary_dilation(bw, footprint=footprint))\n stats = pd.DataFrame(\n measure.regionprops_table(dilated, properties=(\"label\", \"area\"))\n )\n\n # objects that did not merge are alone\n toremove = stats[(stats[\"area\"] <= dist_thresh**2)]\n dilated[np.isin(dilated, toremove[\"label\"])] = 0 # remove them\n\n # apply mask\n bw = bw * dilated\n\n # get points coordinates\n coords = np.argwhere(bw)\n\n return get_collection_from_points(\n coords, geojson_props, rescale_factor=rescale_factor\n )\n
"},{"location":"api-seg.html#cuisto.seg.segment_polygons","title":"segment_polygons(img, geojson_props, area_min=0.0, area_max=np.inf, ecc_min=0.0, ecc_max=1.0, rescale_factor=1.0)
","text":"Polygon segmentation.
Parameters:
Name Type Description Defaultimg
ndarray of bool
Binary image to segment as polygons.
requiredgeojson_props
dict
GeoJSON properties of objects.
requiredarea_min
float
Minimum and maximum area in pixels for an object.
0.0
area_max
float
Minimum and maximum area in pixels for an object.
0.0
ecc_min
float
Minimum and maximum eccentricity for an object.
0.0
ecc_max
float
Minimum and maximum eccentricity for an object.
0.0
rescale_factor
float
Rescale output coordinates by this factor.
1.0
Returns:
Name Type Descriptioncollection
FeatureCollection
A FeatureCollection ready to be written as geojson.
Source code incuisto/seg.py
def segment_polygons(\n img: np.ndarray,\n geojson_props: dict,\n area_min: float = 0.0,\n area_max: float = np.inf,\n ecc_min: float = 0.0,\n ecc_max: float = 1.0,\n rescale_factor: float = 1.0,\n) -> geojson.FeatureCollection:\n \"\"\"\n Polygon segmentation.\n\n Parameters\n ----------\n img : ndarray of bool\n Binary image to segment as polygons.\n geojson_props : dict\n GeoJSON properties of objects.\n area_min, area_max : float\n Minimum and maximum area in pixels for an object.\n ecc_min, ecc_max : float\n Minimum and maximum eccentricity for an object.\n rescale_factor: float\n Rescale output coordinates by this factor.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n A FeatureCollection ready to be written as geojson.\n\n \"\"\"\n\n label_image = measure.label(img)\n\n # get objects properties\n stats = pd.DataFrame(\n measure.regionprops_table(\n label_image, properties=(\"label\", \"area\", \"eccentricity\")\n )\n )\n\n # remove objects not matching filters\n toremove = stats[\n (stats[\"area\"] < area_min)\n | (stats[\"area\"] > area_max)\n | (stats[\"eccentricity\"] < ecc_min)\n | (stats[\"eccentricity\"] > ecc_max)\n ]\n\n label_image[np.isin(label_image, toremove[\"label\"])] = 0\n\n # find objects countours\n label_image = label_image > 0\n contours = measure.find_contours(label_image)\n\n return get_collection_from_poly(\n contours, geojson_props, rescale_factor=rescale_factor\n )\n
"},{"location":"api-utils.html","title":"cuisto.utils","text":"utils module, part of cuisto.
Contains utilities functions.
"},{"location":"api-utils.html#cuisto.utils.add_brain_region","title":"add_brain_region(df, atlas, col='Parent')
","text":"Add brain region to a DataFrame with Atlas_X
, Atlas_Y
and Atlas_Z
columns.
This uses Brainglobe Atlas API to query the atlas. It does not use the structure_from_coords() method, instead it manually converts the coordinates in stack indices, then get the corresponding annotation id and query the corresponding acronym -- because brainglobe-atlasapi is not vectorized at all.
Parameters:
Name Type Description Defaultdf
DataFrame
DataFrame with atlas coordinates in microns.
requiredatlas
BrainGlobeAtlas
required col
str
Column in which to put the regions acronyms. Default is \"Parent\".
'Parent'
Returns:
Name Type Descriptiondf
DataFrame
Same DataFrame with a new \"Parent\" column.
Source code incuisto/utils.py
def add_brain_region(\n df: pd.DataFrame, atlas: BrainGlobeAtlas, col=\"Parent\"\n) -> pd.DataFrame:\n \"\"\"\n Add brain region to a DataFrame with `Atlas_X`, `Atlas_Y` and `Atlas_Z` columns.\n\n This uses Brainglobe Atlas API to query the atlas. It does not use the\n structure_from_coords() method, instead it manually converts the coordinates in\n stack indices, then get the corresponding annotation id and query the corresponding\n acronym -- because brainglobe-atlasapi is not vectorized at all.\n\n Parameters\n ----------\n df : pd.DataFrame\n DataFrame with atlas coordinates in microns.\n atlas : BrainGlobeAtlas\n col : str, optional\n Column in which to put the regions acronyms. Default is \"Parent\".\n\n Returns\n -------\n df : pd.DataFrame\n Same DataFrame with a new \"Parent\" column.\n\n \"\"\"\n df_in = df.copy()\n\n res = atlas.resolution # microns <-> pixels conversion\n lims = atlas.shape_um # out of brain\n\n # set out-of-brain objects at 0 so we get \"root\" as their parent\n df_in.loc[(df_in[\"Atlas_X\"] >= lims[0]) | (df_in[\"Atlas_X\"] < 0), \"Atlas_X\"] = 0\n df_in.loc[(df_in[\"Atlas_Y\"] >= lims[1]) | (df_in[\"Atlas_Y\"] < 0), \"Atlas_Y\"] = 0\n df_in.loc[(df_in[\"Atlas_Z\"] >= lims[2]) | (df_in[\"Atlas_Z\"] < 0), \"Atlas_Z\"] = 0\n\n # build the multi index, in pixels and integers\n ixyz = (\n df_in[\"Atlas_X\"].divide(res[0]).astype(int),\n df_in[\"Atlas_Y\"].divide(res[1]).astype(int),\n df_in[\"Atlas_Z\"].divide(res[2]).astype(int),\n )\n # convert i, j, k indices in raveled indices\n linear_indices = np.ravel_multi_index(ixyz, dims=atlas.annotation.shape)\n # get the structure id from the annotation stack\n idlist = atlas.annotation.ravel()[linear_indices]\n # replace 0 which does not exist to 997 (root)\n idlist[idlist == 0] = 997\n\n # query the corresponding acronyms\n lookup = atlas.lookup_df.set_index(\"id\")\n df.loc[:, col] = lookup.loc[idlist, \"acronym\"].values\n\n return df\n
"},{"location":"api-utils.html#cuisto.utils.add_channel","title":"add_channel(df, object_type, channel_names)
","text":"Add channel as a measurement for detections DataFrame.
The channel is read from the Classification column, the latter having to be formatted as \"object_type: channel\".
Parameters:
Name Type Description Defaultdf
DataFrame
DataFrame with detections measurements.
requiredobject_type
str
Object type (primary classification).
requiredchannel_names
dict
Map between original channel names to something else.
requiredReturns:
Type DescriptionDataFrame
Same DataFrame with a \"channel\" column.
Source code incuisto/utils.py
def add_channel(\n df: pd.DataFrame, object_type: str, channel_names: dict\n) -> pd.DataFrame:\n \"\"\"\n Add channel as a measurement for detections DataFrame.\n\n The channel is read from the Classification column, the latter having to be\n formatted as \"object_type: channel\".\n\n Parameters\n ----------\n df : pd.DataFrame\n DataFrame with detections measurements.\n object_type : str\n Object type (primary classification).\n channel_names : dict\n Map between original channel names to something else.\n\n Returns\n -------\n pd.DataFrame\n Same DataFrame with a \"channel\" column.\n\n \"\"\"\n # check if there is something to do\n if \"channel\" in df.columns:\n return df\n\n kind = get_df_kind(df)\n if kind == \"annotation\":\n warnings.warn(\"Annotation DataFrame not supported.\")\n return df\n\n # add channel, from {class_name: channel} classification\n df[\"channel\"] = (\n df[\"Classification\"].str.replace(object_type + \": \", \"\").map(channel_names)\n )\n\n return df\n
"},{"location":"api-utils.html#cuisto.utils.add_hemisphere","title":"add_hemisphere(df, hemisphere_names, midline=5700, col='Atlas_Z', atlas_type='brain')
","text":"Add hemisphere (left/right) as a measurement for detections or annotations.
The hemisphere is read in the \"Classification\" column for annotations. The latter needs to be in the form \"Right: Name\" or \"Left: Name\". For detections, the input col
of df
is compared to midline
to assess if the object belong to the left or right hemispheres.
Parameters:
Name Type Description Defaultdf
DataFrame
DataFrame with detections or annotations measurements.
requiredhemisphere_names
dict
Map between \"Left\" and \"Right\" to something else.
requiredmidline
float
Used only for \"detections\" df
. Corresponds to the brain midline in microns, should be 5700 for CCFv3 and 1610 for spinal cord.
5700
col
str
Name of the column containing the Z coordinate (medio-lateral) in microns. Default is \"Atlas_Z\".
'Atlas_Z'
atlas_type
(brain, cord)
Type of atlas used for registration. Required because the brain atlas is swapped between left and right while the spinal cord atlas is not. Default is \"brain\".
\"brain\"
Returns:
Name Type Descriptiondf
DataFrame
The same DataFrame with a new \"hemisphere\" column
Source code incuisto/utils.py
def add_hemisphere(\n df: pd.DataFrame,\n hemisphere_names: dict,\n midline: float = 5700,\n col: str = \"Atlas_Z\",\n atlas_type: str = \"brain\",\n) -> pd.DataFrame:\n \"\"\"\n Add hemisphere (left/right) as a measurement for detections or annotations.\n\n The hemisphere is read in the \"Classification\" column for annotations. The latter\n needs to be in the form \"Right: Name\" or \"Left: Name\". For detections, the input\n `col` of `df` is compared to `midline` to assess if the object belong to the left or\n right hemispheres.\n\n Parameters\n ----------\n df : pandas.DataFrame\n DataFrame with detections or annotations measurements.\n hemisphere_names : dict\n Map between \"Left\" and \"Right\" to something else.\n midline : float\n Used only for \"detections\" `df`. Corresponds to the brain midline in microns,\n should be 5700 for CCFv3 and 1610 for spinal cord.\n col : str, optional\n Name of the column containing the Z coordinate (medio-lateral) in microns.\n Default is \"Atlas_Z\".\n atlas_type : {\"brain\", \"cord\"}, optional\n Type of atlas used for registration. Required because the brain atlas is swapped\n between left and right while the spinal cord atlas is not. Default is \"brain\".\n\n Returns\n -------\n df : pandas.DataFrame\n The same DataFrame with a new \"hemisphere\" column\n\n \"\"\"\n # check if there is something to do\n if \"hemisphere\" in df.columns:\n return df\n\n # get kind of DataFrame\n kind = get_df_kind(df)\n\n if kind == \"detection\":\n # use midline\n if atlas_type == \"brain\":\n # brain atlas : beyond midline, it's left\n df.loc[df[col] >= midline, \"hemisphere\"] = hemisphere_names[\"Left\"]\n df.loc[df[col] < midline, \"hemisphere\"] = hemisphere_names[\"Right\"]\n elif atlas_type == \"cord\":\n # cord atlas : below midline, it's left\n df.loc[df[col] <= midline, \"hemisphere\"] = hemisphere_names[\"Left\"]\n df.loc[df[col] > midline, \"hemisphere\"] = hemisphere_names[\"Right\"]\n\n elif kind == \"annotation\":\n # use Classification name -- this does not depend on atlas type\n df[\"hemisphere\"] = [name.split(\":\")[0] for name in df[\"Classification\"]]\n df[\"hemisphere\"] = df[\"hemisphere\"].map(hemisphere_names)\n\n return df\n
"},{"location":"api-utils.html#cuisto.utils.ccf_to_stereo","title":"ccf_to_stereo(x_ccf, y_ccf, z_ccf=0)
","text":"Convert X, Y, Z coordinates in CCFv3 to stereotaxis coordinates (as in Paxinos-Franklin atlas).
Coordinates are shifted, rotated and squeezed, see (1) for more info. Input must be in mm. x_ccf
corresponds to the anterio-posterior (rostro-caudal) axis. y_ccf
corresponds to the dorso-ventral axis. z_ccf
corresponds to the medio-lateral axis (left-right) axis.
Warning : it is a rough estimation.
(1) https://community.brain-map.org/t/how-to-transform-ccf-x-y-z-coordinates-into-stereotactic-coordinates/1858
Parameters:
Name Type Description Defaultx_ccf
floats or ndarray
Coordinates in CCFv3 space in mm.
requiredy_ccf
floats or ndarray
Coordinates in CCFv3 space in mm.
requiredz_ccf
float or ndarray
Coordinate in CCFv3 space in mm. Default is 0.
0
Returns:
Type Descriptionap, dv, ml : floats or np.ndarray
Stereotaxic coordinates in mm.
Source code incuisto/utils.py
def ccf_to_stereo(\n x_ccf: float | np.ndarray, y_ccf: float | np.ndarray, z_ccf: float | np.ndarray = 0\n) -> tuple:\n \"\"\"\n Convert X, Y, Z coordinates in CCFv3 to stereotaxis coordinates (as in\n Paxinos-Franklin atlas).\n\n Coordinates are shifted, rotated and squeezed, see (1) for more info. Input must be\n in mm.\n `x_ccf` corresponds to the anterio-posterior (rostro-caudal) axis.\n `y_ccf` corresponds to the dorso-ventral axis.\n `z_ccf` corresponds to the medio-lateral axis (left-right) axis.\n\n Warning : it is a rough estimation.\n\n (1) https://community.brain-map.org/t/how-to-transform-ccf-x-y-z-coordinates-into-stereotactic-coordinates/1858\n\n Parameters\n ----------\n x_ccf, y_ccf : floats or np.ndarray\n Coordinates in CCFv3 space in mm.\n z_ccf : float or np.ndarray, optional\n Coordinate in CCFv3 space in mm. Default is 0.\n\n Returns\n -------\n ap, dv, ml : floats or np.ndarray\n Stereotaxic coordinates in mm.\n\n \"\"\"\n # Center CCF on Bregma\n xstereo = -(x_ccf - 5.40) # anterio-posterior coordinate (rostro-caudal)\n ystereo = y_ccf - 0.44 # dorso-ventral coordinate\n ml = z_ccf - 5.70 # medio-lateral coordinate (left-right)\n\n # Rotate CCF of 5\u00b0\n angle = np.deg2rad(5)\n ap = xstereo * np.cos(angle) - ystereo * np.sin(angle)\n dv = xstereo * np.sin(angle) + ystereo * np.cos(angle)\n\n # Squeeze the dorso-ventral axis by 94.34%\n dv *= 0.9434\n\n return ap, dv, ml\n
"},{"location":"api-utils.html#cuisto.utils.filter_df_classifications","title":"filter_df_classifications(df, filter_list, mode='keep', col='Classification')
","text":"Filter a DataFrame whether specified col
column entries contain elements in filter_list
. Case insensitive.
If mode
is \"keep\", keep entries only if their col
in is in the list (default). If mode
is \"remove\", remove entries if their col
is in the list.
Parameters:
Name Type Description Defaultdf
DataFrame
required filter_list
list | tuple | str
List of words that should be present to trigger the filter.
requiredmode
keep or remove
Keep or remove entries from the list. Default is \"keep\".
'keep'
col
str
Key in df
. Default is \"Classification\".
'Classification'
Returns:
Type DescriptionDataFrame
Filtered DataFrame.
Source code incuisto/utils.py
def filter_df_classifications(\n df: pd.DataFrame, filter_list: list | tuple | str, mode=\"keep\", col=\"Classification\"\n) -> pd.DataFrame:\n \"\"\"\n Filter a DataFrame whether specified `col` column entries contain elements in\n `filter_list`. Case insensitive.\n\n If `mode` is \"keep\", keep entries only if their `col` in is in the list (default).\n If `mode` is \"remove\", remove entries if their `col` is in the list.\n\n Parameters\n ----------\n df : pd.DataFrame\n filter_list : list | tuple | str\n List of words that should be present to trigger the filter.\n mode : \"keep\" or \"remove\", optional\n Keep or remove entries from the list. Default is \"keep\".\n col : str, optional\n Key in `df`. Default is \"Classification\".\n\n Returns\n -------\n pd.DataFrame\n Filtered DataFrame.\n\n \"\"\"\n # check input\n if isinstance(filter_list, str):\n filter_list = [filter_list] # make sure it is a list\n\n if col not in df.columns:\n # might be because of 'Classification' instead of 'classification'\n col = col.capitalize()\n if col not in df.columns:\n raise KeyError(f\"{col} not in DataFrame.\")\n\n pattern = \"|\".join(f\".*{s}.*\" for s in filter_list)\n\n if mode == \"keep\":\n df_return = df[df[col].str.contains(pattern, case=False, regex=True)]\n elif mode == \"remove\":\n df_return = df[~df[col].str.contains(pattern, case=False, regex=True)]\n\n # check\n if len(df_return) == 0:\n raise ValueError(\n (\n f\"Filtering '{col}' with {filter_list} resulted in an\"\n + \" empty DataFrame, check your config file.\"\n )\n )\n return df_return\n
"},{"location":"api-utils.html#cuisto.utils.filter_df_regions","title":"filter_df_regions(df, filter_list, mode='keep', col='Parent')
","text":"Filters entries in df
based on wether their col
is in filter_list
or not.
If mode
is \"keep\", keep entries only if their col
in is in the list (default). If mode
is \"remove\", remove entries if their col
is in the list.
Parameters:
Name Type Description Defaultdf
DataFrame
required filter_list
list - like
List of regions to keep or remove from the DataFrame.
requiredmode
keep or remove
Keep or remove entries from the list. Default is \"keep\".
'keep'
col
str
Key in df
. Default is \"Parent\".
'Parent'
Returns:
Name Type Descriptiondf
DataFrame
Filtered DataFrame.
Source code incuisto/utils.py
def filter_df_regions(\n df: pd.DataFrame, filter_list: list | tuple, mode=\"keep\", col=\"Parent\"\n) -> pd.DataFrame:\n \"\"\"\n Filters entries in `df` based on wether their `col` is in `filter_list` or not.\n\n If `mode` is \"keep\", keep entries only if their `col` in is in the list (default).\n If `mode` is \"remove\", remove entries if their `col` is in the list.\n\n Parameters\n ----------\n df : pandas.DataFrame\n filter_list : list-like\n List of regions to keep or remove from the DataFrame.\n mode : \"keep\" or \"remove\", optional\n Keep or remove entries from the list. Default is \"keep\".\n col : str, optional\n Key in `df`. Default is \"Parent\".\n\n Returns\n -------\n df : pandas.DataFrame\n Filtered DataFrame.\n\n \"\"\"\n\n if mode == \"keep\":\n return df[df[col].isin(filter_list)]\n if mode == \"remove\":\n return df[~df[col].isin(filter_list)]\n
"},{"location":"api-utils.html#cuisto.utils.get_blacklist","title":"get_blacklist(file, atlas)
","text":"Build a list of regions to exclude from file.
File must be a TOML with [WITH_CHILDS] and [EXACT] sections.
Parameters:
Name Type Description Defaultfile
str
Full path the atlas_blacklist.toml file.
requiredatlas
BrainGlobeAtlas
Atlas to extract regions from.
requiredReturns:
Name Type Descriptionblack_list
list
Full list of acronyms to discard.
Source code incuisto/utils.py
def get_blacklist(file: str, atlas: BrainGlobeAtlas) -> list:\n \"\"\"\n Build a list of regions to exclude from file.\n\n File must be a TOML with [WITH_CHILDS] and [EXACT] sections.\n\n Parameters\n ----------\n file : str\n Full path the atlas_blacklist.toml file.\n atlas : BrainGlobeAtlas\n Atlas to extract regions from.\n\n Returns\n -------\n black_list : list\n Full list of acronyms to discard.\n\n \"\"\"\n with open(file, \"rb\") as fid:\n content = tomllib.load(fid)\n\n blacklist = [] # init. the list\n\n # add regions and their descendants\n for region in content[\"WITH_CHILDS\"][\"members\"]:\n blacklist.extend(\n [\n atlas.structures[id][\"acronym\"]\n for id in atlas.structures.tree.expand_tree(\n atlas.structures[region][\"id\"]\n )\n ]\n )\n\n # add regions specified exactly (no descendants)\n blacklist.extend(content[\"EXACT\"][\"members\"])\n\n return blacklist\n
"},{"location":"api-utils.html#cuisto.utils.get_data_coverage","title":"get_data_coverage(df, col='Atlas_AP', by='animal')
","text":"Get min and max in col
for each by
.
Used to get data coverage for each animal to plot in distributions.
Parameters:
Name Type Description Defaultdf
DataFrame
description
requiredcol
str
Key in df
, default is \"Atlas_X\".
'Atlas_AP'
by
str
Key in df
, default is \"animal\".
'animal'
Returns:
Type DescriptionDataFrame
min and max of col
for each by
, named \"X_min\", and \"X_max\".
cuisto/utils.py
def get_data_coverage(df: pd.DataFrame, col=\"Atlas_AP\", by=\"animal\") -> pd.DataFrame:\n \"\"\"\n Get min and max in `col` for each `by`.\n\n Used to get data coverage for each animal to plot in distributions.\n\n Parameters\n ----------\n df : pd.DataFrame\n _description_\n col : str, optional\n Key in `df`, default is \"Atlas_X\".\n by : str, optional\n Key in `df` , default is \"animal\".\n\n Returns\n -------\n pd.DataFrame\n min and max of `col` for each `by`, named \"X_min\", and \"X_max\".\n\n \"\"\"\n df_group = df.groupby([by])\n return pd.DataFrame(\n [\n df_group[col].min(),\n df_group[col].max(),\n ],\n index=[\"X_min\", \"X_max\"],\n )\n
"},{"location":"api-utils.html#cuisto.utils.get_df_kind","title":"get_df_kind(df)
","text":"Get DataFrame kind, eg. Annotations or Detections.
It is based on reading the Object Type of the first entry, so the DataFrame must have only one kind of object.
Parameters:
Name Type Description Defaultdf
DataFrame
required Returns:
Name Type Descriptionkind
str
\"detection\" or \"annotation\".
Source code incuisto/utils.py
def get_df_kind(df: pd.DataFrame) -> str:\n \"\"\"\n Get DataFrame kind, eg. Annotations or Detections.\n\n It is based on reading the Object Type of the first entry, so the DataFrame must\n have only one kind of object.\n\n Parameters\n ----------\n df : pandas.DataFrame\n\n Returns\n -------\n kind : str\n \"detection\" or \"annotation\".\n\n \"\"\"\n return df[\"Object type\"].iloc[0].lower()\n
"},{"location":"api-utils.html#cuisto.utils.get_injection_site","title":"get_injection_site(animal, info_file, channel, stereo=False)
","text":"Get the injection site coordinates associated with animal.
Parameters:
Name Type Description Defaultanimal
str
Animal ID.
requiredinfo_file
str
Path to TOML info file.
requiredchannel
str
Channel ID as in the TOML file.
requiredstereo
bool
Wether to convert coordinates in stereotaxis coordinates. Default is False.
False
Returns:
Type Descriptionx, y, z : floats
Injection site coordinates.
Source code incuisto/utils.py
def get_injection_site(\n animal: str, info_file: str, channel: str, stereo: bool = False\n) -> tuple:\n \"\"\"\n Get the injection site coordinates associated with animal.\n\n Parameters\n ----------\n animal : str\n Animal ID.\n info_file : str\n Path to TOML info file.\n channel : str\n Channel ID as in the TOML file.\n stereo : bool, optional\n Wether to convert coordinates in stereotaxis coordinates. Default is False.\n\n Returns\n -------\n x, y, z : floats\n Injection site coordinates.\n\n \"\"\"\n with open(info_file, \"rb\") as fid:\n info = tomllib.load(fid)\n\n if channel in info[animal]:\n x, y, z = info[animal][channel][\"injection_site\"]\n if stereo:\n x, y, z = ccf_to_stereo(x, y, z)\n else:\n x, y, z = None, None, None\n\n return x, y, z\n
"},{"location":"api-utils.html#cuisto.utils.get_leaves_list","title":"get_leaves_list(atlas)
","text":"Get the list of leaf brain regions.
Leaf brain regions are defined as regions without childs, eg. regions that are at the bottom of the hiearchy.
Parameters:
Name Type Description Defaultatlas
BrainGlobeAtlas
Atlas to extract regions from.
requiredReturns:
Name Type Descriptionleaves_list
list
Acronyms of leaf brain regions.
Source code incuisto/utils.py
def get_leaves_list(atlas: BrainGlobeAtlas) -> list:\n \"\"\"\n Get the list of leaf brain regions.\n\n Leaf brain regions are defined as regions without childs, eg. regions that are at\n the bottom of the hiearchy.\n\n Parameters\n ----------\n atlas : BrainGlobeAtlas\n Atlas to extract regions from.\n\n Returns\n -------\n leaves_list : list\n Acronyms of leaf brain regions.\n\n \"\"\"\n leaves_list = []\n for region in atlas.structures_list:\n if atlas.structures.tree[region[\"id\"]].is_leaf():\n leaves_list.append(region[\"acronym\"])\n\n return leaves_list\n
"},{"location":"api-utils.html#cuisto.utils.get_mapping_fusion","title":"get_mapping_fusion(fusion_file)
","text":"Get mapping dictionnary between input brain regions and new regions defined in atlas_fusion.toml
file.
The returned dictionnary can be used in DataFrame.replace().
Parameters:
Name Type Description Defaultfusion_file
str
Path to the TOML file with the merging rules.
requiredReturns:
Name Type Descriptionm
dict
Mapping as {old: new}.
Source code incuisto/utils.py
def get_mapping_fusion(fusion_file: str) -> dict:\n \"\"\"\n Get mapping dictionnary between input brain regions and new regions defined in\n `atlas_fusion.toml` file.\n\n The returned dictionnary can be used in DataFrame.replace().\n\n Parameters\n ----------\n fusion_file : str\n Path to the TOML file with the merging rules.\n\n Returns\n -------\n m : dict\n Mapping as {old: new}.\n\n \"\"\"\n with open(fusion_file, \"rb\") as fid:\n df = pd.DataFrame.from_dict(tomllib.load(fid), orient=\"index\").set_index(\n \"acronym\"\n )\n\n return (\n df.drop(columns=\"name\")[\"members\"]\n .explode()\n .reset_index()\n .set_index(\"members\")\n .to_dict()[\"acronym\"]\n )\n
"},{"location":"api-utils.html#cuisto.utils.get_starter_cells","title":"get_starter_cells(animal, channel, info_file)
","text":"Get the number of starter cells associated with animal.
Parameters:
Name Type Description Defaultanimal
str
Animal ID.
requiredchannel
str
Channel ID.
requiredinfo_file
str
Path to TOML info file.
requiredReturns:
Name Type Descriptionn_starters
int
Number of starter cells.
Source code incuisto/utils.py
def get_starter_cells(animal: str, channel: str, info_file: str) -> int:\n \"\"\"\n Get the number of starter cells associated with animal.\n\n Parameters\n ----------\n animal : str\n Animal ID.\n channel : str\n Channel ID.\n info_file : str\n Path to TOML info file.\n\n Returns\n -------\n n_starters : int\n Number of starter cells.\n\n \"\"\"\n with open(info_file, \"rb\") as fid:\n info = tomllib.load(fid)\n\n return info[animal][channel][\"starter_cells\"]\n
"},{"location":"api-utils.html#cuisto.utils.merge_regions","title":"merge_regions(df, col, fusion_file)
","text":"Merge brain regions following rules in the fusion_file.toml
file.
Apply this merging on col
of the input DataFrame. col
whose value is found in the members
sections in the file will be changed to the new acronym.
Parameters:
Name Type Description Defaultdf
DataFrame
required col
str
Column of df
on which to apply the mapping.
fusion_file
str
Path to the toml file with the merging rules.
requiredReturns:
Name Type Descriptiondf
DataFrame
Same DataFrame with regions renamed.
Source code incuisto/utils.py
def merge_regions(df: pd.DataFrame, col: str, fusion_file: str) -> pd.DataFrame:\n \"\"\"\n Merge brain regions following rules in the `fusion_file.toml` file.\n\n Apply this merging on `col` of the input DataFrame. `col` whose value is found in\n the `members` sections in the file will be changed to the new acronym.\n\n Parameters\n ----------\n df : pandas.DataFrame\n col : str\n Column of `df` on which to apply the mapping.\n fusion_file : str\n Path to the toml file with the merging rules.\n\n Returns\n -------\n df : pandas.DataFrame\n Same DataFrame with regions renamed.\n\n \"\"\"\n df[col] = df[col].replace(get_mapping_fusion(fusion_file))\n\n return df\n
"},{"location":"api-utils.html#cuisto.utils.renormalize_per_key","title":"renormalize_per_key(df, by, on)
","text":"Renormalize on
column by its sum for each by
.
Use case : relative density is computed for both hemispheres, so if one wants to plot only one hemisphere, the sum of the bars corresponding to one channel (by
) should be 1. So :
df = df[df[\"hemisphere\"] == \"Ipsi.\"] df = renormalize_per_key(df, \"channel\", \"relative density\") Then, the sum of \"relative density\" for each \"channel\" equals 1.
Parameters:
Name Type Description Defaultdf
DataFrame
required by
str
Key in df
. df
is normalized for each by
.
on
str
Key in df
. Measurement to be normalized.
Returns:
Name Type Descriptiondf
DataFrame
Same DataFrame with normalized on
column.
cuisto/utils.py
def renormalize_per_key(df: pd.DataFrame, by: str, on: str):\n \"\"\"\n Renormalize `on` column by its sum for each `by`.\n\n Use case : relative density is computed for both hemispheres, so if one wants to\n plot only one hemisphere, the sum of the bars corresponding to one channel (`by`)\n should be 1. So :\n >>> df = df[df[\"hemisphere\"] == \"Ipsi.\"]\n >>> df = renormalize_per_key(df, \"channel\", \"relative density\")\n Then, the sum of \"relative density\" for each \"channel\" equals 1.\n\n Parameters\n ----------\n df : pd.DataFrame\n by : str\n Key in `df`. `df` is normalized for each `by`.\n on : str\n Key in `df`. Measurement to be normalized.\n\n Returns\n -------\n df : pd.DataFrame\n Same DataFrame with normalized `on` column.\n\n \"\"\"\n norm = df.groupby(by)[on].sum()\n bys = df[by].unique()\n for key in bys:\n df.loc[df[by] == key, on] = df.loc[df[by] == key, on].divide(norm[key])\n\n return df\n
"},{"location":"api-utils.html#cuisto.utils.select_hemisphere_channel","title":"select_hemisphere_channel(df, hue, hue_filter, hue_mirror)
","text":"Select relevant data given hue and filters.
Returns the DataFrame with only things to be used.
Parameters:
Name Type Description Defaultdf
DataFrame
DataFrame to filter.
requiredhue
(hemisphere, channel)
hue that will be used in seaborn plots.
\"hemisphere\"
hue_filter
str
Selected data.
requiredhue_mirror
bool
Instead of keeping only hue_filter values, they will be plotted in mirror.
requiredReturns:
Name Type Descriptiondfplt
DataFrame
DataFrame to be used in plots.
Source code incuisto/utils.py
def select_hemisphere_channel(\n df: pd.DataFrame, hue: str, hue_filter: str, hue_mirror: bool\n) -> pd.DataFrame:\n \"\"\"\n Select relevant data given hue and filters.\n\n Returns the DataFrame with only things to be used.\n\n Parameters\n ----------\n df : pd.DataFrame\n DataFrame to filter.\n hue : {\"hemisphere\", \"channel\"}\n hue that will be used in seaborn plots.\n hue_filter : str\n Selected data.\n hue_mirror : bool\n Instead of keeping only hue_filter values, they will be plotted in mirror.\n\n Returns\n -------\n dfplt : pd.DataFrame\n DataFrame to be used in plots.\n\n \"\"\"\n dfplt = df.copy()\n\n if hue == \"hemisphere\":\n # hue_filter is used to select channels\n # keep only left and right hemispheres, not \"both\"\n dfplt = dfplt[dfplt[\"hemisphere\"] != \"both\"]\n if hue_filter == \"all\":\n hue_filter = dfplt[\"channel\"].unique()\n elif not isinstance(hue_filter, (list, tuple)):\n # it is allowed to select several channels so handle lists\n hue_filter = [hue_filter]\n dfplt = dfplt[dfplt[\"channel\"].isin(hue_filter)]\n elif hue == \"channel\":\n # hue_filter is used to select hemispheres\n # it can only be left, right, both or empty\n if hue_filter == \"both\":\n # handle if it's a coordinates DataFrame which doesn't have \"both\"\n if \"both\" not in dfplt[\"hemisphere\"].unique():\n # keep both hemispheres, don't do anything\n pass\n else:\n if hue_mirror:\n # we need to keep both hemispheres to plot them in mirror\n dfplt = dfplt[dfplt[\"hemisphere\"] != \"both\"]\n else:\n # we keep the metrics computed in both hemispheres\n dfplt = dfplt[dfplt[\"hemisphere\"] == \"both\"]\n else:\n # hue_filter should correspond to an hemisphere name\n dfplt = dfplt[dfplt[\"hemisphere\"] == hue_filter]\n else:\n # not handled. Just return the DataFrame without filtering, maybe it'll make\n # sense.\n warnings.warn(f\"{hue} should be 'channel' or 'hemisphere'.\")\n\n # check result\n if len(dfplt) == 0:\n warnings.warn(\n f\"hue={hue} and hue_filter={hue_filter} resulted in an empty subset.\"\n )\n\n return dfplt\n
"},{"location":"guide-create-pyramids.html","title":"Create pyramidal OME-TIFF","text":"This page will guide you to use the pyramid-creator
package, in the event the CZI file does not work directly in QuPath. The script will generate pyramids from OME-TIFF files exported from ZEN.
Tip
pyramid-creator
can also pyramidalize images using Python only with the --no-use-qupath
option.
This Python script uses QuPath under the hood, via a companion script called createPyramids.groovy
. It will find the OME-TIFF files and make QuPath run the groovy script on it, in console mode (without graphical user interface).
This script is standalone, eg. it does not rely on the cuisto
package. But installing the later makes sure all dependencies are installed (namely typer
and tqdm
with the QuPath backend and quite a few more for the Python backend).
pyramid-creator
moved to a standalone package that you can find here with installation and usage instructions.
You will find instructions on the dedicated project page over at Github.
For reference :
You will need conda
, follow those instructions to install it.
Then, create a virtual environment if you didn't already (pyramid-creator
can be installed in the environment for cuisto
) and install the pyramid-creator
package.
conda create -c conda-forge -n cuisto-env python=3.12 # not required if you already create an environment\nconda activate cuisto-env\npip install pyramid-creator\n
To use the Python backend (with tifffile
), replace the last line with : pip install pyramid-creator[python-backend]\n
To use the QuPath backend, a working QuPath installation is required, and the pyramid-creator
command needs to be aware of its location. To do so, first, install QuPath. By default, it will install in ~\\AppData\\QuPath-0.X.Y
. In any case, note down the installation location.
Then, you have several options : - Create a file in your user directory called \"QUPATH_PATH\" (without extension), containing the full path to the QuPath console executable. In my case, it reads : C:\\Users\\glegoc\\AppData\\Local\\QuPath-0.5.1\\QuPath-0.5.1 (console).exe
. Then, the pyramid-creator
script will read this file to find the QuPath executable. - Specify the QuPath path as an option when calling the command line interface (see the Usage section) :
pyramid-creator /path/to/your/images --qupath-path \"C:\\Users\\glegoc\\AppData\\Local\\QuPath-0.5.1\\QuPath-0.5.1 (console).exe\"\n
- Specify the QuPath path as an option when using the package in a Python script (see the Usage section) : from pyramid_creator import pyramidalize_directory\npyramidalize_directory(\"/path/to/your/images/\", qupath_path=\"C:\\Users\\glegoc\\AppData\\Local\\QuPath-0.5.1\\QuPath-0.5.1 (console).exe\")\n
- If you're using Windows, using QuPath v0.6.0, v0.5.1 or v0.5.0 and chose the default installation location, pyramid-creator
should find it automatically and write it down in the \"QUPATH_PATH\" file by itself."},{"location":"guide-create-pyramids.html#export-czi-to-ome-tiff","title":"Export CZI to OME-TIFF","text":"OME-TIFF is a specification of the TIFF image format. It specifies how the metadata should be written to the file to be interoperable between softwares. ZEN can export to OME-TIFF so you don't need to pay attention to metadata. Therefore, you won't need to specify pixel size and channels names and colors as it will be read directly from the OME-TIFF files.
The OME-TIFF files should be ready to be pyramidalized with the create_pyramids.py
script.
See the instructions on the dedicated project page over at Github.
"},{"location":"guide-install-abba.html","title":"Install ABBA","text":"You can head to the ABBA documentation for installation instructions. You'll see that a Windows installer is available. While it might be working great, I prefer to do it manually step-by-step to make sure everything is going well.
You will find below installation instructions for the regular ABBA Fiji plugin, which proposes only the mouse and rat brain atlases. To be able to use the Brainglobe atlases, you will need the Python version. The two can be installed alongside each other.
"},{"location":"guide-install-abba.html#abba-fiji","title":"ABBA Fiji","text":""},{"location":"guide-install-abba.html#install-fiji","title":"Install Fiji","text":"Install the \"batteries-included\" distribution of ImageJ, Fiji, from the official website.
Warning
Extract Fiji somewhere you have write access, otherwise Fiji will not be able to download and install plugins. In other words, put the folder in your User directory and not in C:\\, C:\\Program Files and the like.
We need to add the PTBIOP update site, managed by the bio-imaging and optics facility at EPFL, that contains the ABBA plugin.
Help > Update
... Manage Update Sites
. Look up PTBIOP
, and click on the check box. Apply and Close
, and Apply Changes
. This will download and install the required plugins. Restart ImageJ as suggested. Plugins > BIOP > Atlas > ABBA - ABBA start
, or simply type abba start
in the search box. Choose the \"Adult Mouse Brain - Allen Brain Atlas V3p1\". It will download this atlas and might take a while, depending on your Internet connection.ABBA can leverage the elastix toolbox for automatic 2D in-plane registration.
ABBA should be installed and functional ! You can check the official documentation for usage instructions and some tips here.
"},{"location":"guide-install-abba.html#abba-python","title":"ABBA Python","text":"Brainglobe is an initiative aiming at providing interoperable, model-agnostic Python-based tools for neuroanatomy. They package various published volumetric anatomical atlases of different species (check the list), including the Allen Mouse brain atlas (CCFv3, ref.) and a 3D version of the Allen mouse spinal cord atlas (ref).
To be able to leverage those atlases, we need to make ImageJ and Python be able to talk to each other. This is the purpose of abba_python, that will install ImageJ and its ABBA plugins inside a python environment, with bindings between the two worlds.
"},{"location":"guide-install-abba.html#install-conda","title":"Installconda
","text":"If not done already, follow those instructions to install conda
.
conda create -c conda-forge -n abba_python python=3.10 openjdk=11 maven pyimagej notebook\n
pip install abba-python==0.9.6.dev0\n
conda activate abba_python\n
brainglobe install -a allen_cord_20um\n
ipython\n
You should see the IPython prompt, that looks like this : In [1]:\n
from abba_python import abba\nabba.start_imagej()\n
The first launch needs to initialize ImageJ and install all required plugins, which takes a while (>5min).Tip
Afterwards, to launch ImageJ from Python and do some registration work, you just need to launch a terminal (PowerShell), and do steps 4., 6., and 7.
"},{"location":"guide-install-abba.html#install-the-automatic-registration-tools_1","title":"Install the automatic registration tools","text":"You can follow the same instructions as the regular Fiji version. You can do it from either the \"normal\" Fiji or the ImageJ instance launched from Python, they share the same configuration files. Therefore, if you already did it in regular Fiji, elastix should already be set up and ready to use in ImageJ from Python.
"},{"location":"guide-install-abba.html#troubleshooting","title":"Troubleshooting","text":""},{"location":"guide-install-abba.html#java_home-errors","title":"JAVA_HOME errors","text":"Unfortunately on some computers, Python does not find the Java virtual machine even though it should have been installed when installing OpenJDK with conda. This will result in an error mentionning \"java.dll\" and suggesting to check the JAVA_HOME
environment variable.
The only fix I could find is to install Java system-wide. You can grab a (free) installer on Adoptium, choosing JRE 17.X for your platform. During the installation :
Restart the terminal and try again. Now, ImageJ should use the system-wide Java and it should work.
"},{"location":"guide-install-abba.html#abba-qupath-extension","title":"ABBA QuPath extension","text":"To import registered regions in your QuPath project and be able to convert objects' coordinates in atlas space, the ABBA QuPath extension is required.
Edit > Preferences
. In the Extension
tab, set your QuPath user directory
to a local directory (usually C:\\Users\\USERNAME\\QuPath\\v0.X.Y
).extensions
in your QuPath user directory.qupath-extension-abba-x.y.z.zip
).extensions
folder in your QuPath user directory.Extensions
, you should have an ABBA
entry.While you can use QuPath and cuisto
functionalities as you see fit, there exists a pipeline version of those. It requires a specific structure to store files (so that the different scripts know where to look for data). It also requires that you have detections stored as geojson files, which can be achieved using a pixel classifier and further segmentation (see here) for example.
This is especially useful to perform quantification for several animals at once, where you'll only need to specify the root directory and the animals identifiers that should be pooled together, instead of having to manually specify each detections and annotations files.
Three main scripts and function are used within the pipeline :
exportPixelClassifierProbabilities.groovy
to create prediction maps of objects of interestsegment_image.py
to segment those maps and create geojson files to be imported back to QuPath as detectionspipelineImportExport.groovy
to :$folderPrefix$segmentation/$segTag$/geojson
)Following a specific directory structure ensures subsequent scripts and functions can find required files. The good news is that this structure will mostly be created automatically using the segmentation scripts (from QuPath and Python), as long as you stay consistent filling the parameters of each script. The structure expected by the groovy all-in-one script and cuisto
batch-process function is the following :
some_directory/\n \u251c\u2500\u2500AnimalID0/ \n \u2502 \u251c\u2500\u2500 animalid0_qupath/\n \u2502 \u2514\u2500\u2500 animalid0_segmentation/ \n \u2502 \u2514\u2500\u2500 segtag/ \n \u2502 \u251c\u2500\u2500 annotations/ \n \u2502 \u251c\u2500\u2500 detections/ \n \u2502 \u251c\u2500\u2500 geojson/ \n \u2502 \u2514\u2500\u2500 probabilities/ \n \u251c\u2500\u2500AnimalID1/ \n \u2502 \u251c\u2500\u2500 animalid1_qupath/\n \u2502 \u2514\u2500\u2500 animalid1_segmentation/ \n \u2502 \u2514\u2500\u2500 segtag/ \n \u2502 \u251c\u2500\u2500 annotations/ \n \u2502 \u251c\u2500\u2500 detections/ \n \u2502 \u251c\u2500\u2500 geojson/ \n \u2502 \u2514\u2500\u2500 probabilities/ \n
Info
Except the root directory and the QuPath project, the rest is automatically created based on the parameters provided in the different scripts. Here's the description of the structure and the requirements :
animalid0
should be a convenient animal identifier.AnimalID0
, can be anything but should correspond to one and only one animal.animalid0
should be lower case.animalid0_qupath
can be named as you wish in practice, but should be the QuPath project.animalid0_segmentation
should be called exactly like this -- replacing animalid0
with the actual animal ID. It will be created automatically with the exportPixelClassifierProbabilities.groovy
script.segtag
corresponds to the type of segmentation (cells, fibers...). It is specified in the exportPixelClassifierProbabilities
script. It could be anything, but to recognize if the objects are polygons (and should be counted per regions) or polylines (and the cumulated length should be measured), there are some hardcoded keywords in the segment_images.py
and pipelineImportExport.groovy
scripts :cells
, cell
, polygons
, polygon
synapto
, synaptophysin
, syngfp
, boutons
, points
fibers
, fiber
, axons
, axon
annotations
contains the atlas regions measurements as TSV files.detections
contains the objects atlas coordinates and measurements as CSV files (for punctal objects) or JSON (for polylines objects).geojson
contains objects stored as geojson files. They could be generated with the pixel classifier prediction map segmentation.probabilities
contains the prediction maps to be segmented by the segment_images.py
script.Tip
You can see an example minimal directory structure with only annotations stored in resources/multi
.
Tip
Remember that this is merely an example pipeline, you can shortcut it at any points, as long as you end up with TSV files following the requirements for cuisto
.
exportPixelClassifierProbabilities.groovy
script. You need to get a pixel classifier or create one.segment_images.py
script to generate the geojson files containing the objects of interest.pipelineImportExport.groovy
script on your QuPath project.import cuisto\n\n# Parameters\nwdir = \"/path/to/some_directory\"\nanimals = [\"AnimalID0\", \"AnimalID1\"]\nconfig_file = \"/path/to/your/config.toml\"\noutput_format = \"h5\" # to save the quantification values as hdf5 file\n\n# Processing\ncfg = cuisto.Config(config_file)\ndf_regions, dfs_distributions, df_coordinates = cuisto.process.process_animals(\n wdir, animals, cfg, out_fmt=output_format\n)\n\n# Display\ncuisto.display.plot_regions(df_regions, cfg)\ncuisto.display.plot_1D_distributions(dfs_distributions, cfg, df_coordinates=df_coordinates)\ncuisto.display.plot_2D_distributions(df_coordinates, cfg)\n
Tip
You can see a live example in this demo notebook.
"},{"location":"guide-prepare-qupath.html","title":"Prepare QuPath data","text":"cuisto
uses some QuPath classifications concepts, make sure to be familiar with them with the official documentation. Notably, we use the concept of primary classification and derived classification : an object classfied as First: second
is of classification First
and of derived classification second
.
cuisto
assumes a specific way of storing regions and objects information in the TSV files exported from QuPath. Note that only one primary classification is supported, but you can have any number of derived classifications.
Detections are the objects of interest. Their information must respect the following :
Atlas_X
, Atlas_Y
, Atlas_Z
. They correspond, respectively, to the anterio-posterior (rostro-caudal) axis, the inferio-superior (dorso-ventral) axis and the left-right (medio-lateral) axis.Primary: second
. Primary would be an object type (cells, fibers, ...), the second one would be a biological marker or a detection channel (fluorescence channel name), for instance : Cells: some marker
, or Fibers: EGFP
.Annotations correspond to the atlas regions. Their information must respect the following :
Hemisphere: acronym
(for ex. Left: PAG
).Primary classification: derived classification measurement name
. For instance : Cells: some marker Count
.Fibers: EGFP Length \u00b5m
.cuisto
","text":"While you're free to add any measurements as long as they follow the requirements, keep in mind that for atlas regions quantification, cuisto
will only compute, pool and average the following metrics :
It is then up to you to select which metrics among those to compute and display and name them, via the configuration file.
For punctal detections (eg. objects whose only the centroid is considered), only the atlas coordinates are used, to compute and display spatial distributions of objects across the brain (using their classifications to give each distributions different hues). For fibers-like objects, it requires to export the lines detections atlas coordinates as JSON files, with the exportFibersAtlasCoordinates.groovy
script (this is done automatically when using the pipeline).
The groovy script under scripts/qupath-utils/measurements/addRegionsCount.groovy
will add a properly formatted count of objects of selected classifications in all atlas regions. This is used for punctual objects (polygons or points), for example objects created in QuPath or with the segmentation script.
The groovy script under scripts/qupath-utils/measurements/addRegionsLength.groovy
will add the properly formatted cumulated lenghth in microns of fibers-like objects in all atlas regions. This is used for polylines objects, for example generated with the segmentation script.
Keeping in mind cuisto
limitations, you can add any measurements you'd like.
For example, you can run a pixel classifier in all annotations (eg. atlas regions). Using the Measure
button, it will add a measurement of the area covered by classified pixels. Then, you can use the script located under scripts/qupath-utils/measurements/renameMeasurements.groovy
to rename the generated measurements with a properly-formatted name. Finally, you can export regions measurements.
Since cuisto
will compute a \"density\", eg. the measurement divided by the region area, in this case, it will correspond to the fraction of surface occupied by classified pixels. This is showcased in the Examples.
Once you imported atlas regions registered with ABBA, detected objects in your images and added properly formatted measurements to detections and annotations, you can :
Measure > Export measurements
Output file
(specify in the file name if it is a detections or annotations file)Detections
or Annoations
in Export type
Export
Do this for both Detections and Annotations, you can then use those files with cuisto
(see the Examples).
The QuPath documentation is quite extensive, detailed, very well explained and contains full guides on how to create a QuPath project and how to find objects of interests. It is therefore a highly recommended read, nevertheless, you will find below some quick reminders.
"},{"location":"guide-qupath-objects.html#qupath-project","title":"QuPath project","text":"QuPath works with projects. It is basically a folder with a main project.qproj
file, which is a JSON file that contains all the data about your images except the images themselves. Algonside, there is a data
folder with an entry for each image, that stores the thumbnails, metadata about the image and detections and annotations but, again, not the image itself. The actual images can be stored anywhere (including a remote server), the QuPath project merely contains the information needed to fetch them and display them. QuPath will never modify your image data.
This design makes the QuPath project itself lightweight (should never exceed 500MB even with millions of detections), and portable : upon opening, if QuPath is not able to find the images where they should be, it will ask for their new locations.
Tip
It is recommended to create the QuPath project locally on your computer, to avoid any risk of conflicts if two people open it at the same time. Nevertheless, you should backup the project regularly on a remote server.
To create a new project, simply drag & drop an empty folder into QuPath window and accept to create a new empty project. Then, add images :
Add images
, then Choose files
on the bottom. Drag & drop does not really work as the images will not be sorted properly.Then, choose the following options :
Image server
Default (let QuPath decide)
Set image type
Most likely, fluorescence
Rotate image
No rotation (unless all your images should be rotated)
Optional args
Leave empty
Auto-generate pyramids
Uncheck
Import objects
Uncheck
Show image selector
Might be useful to check if the images are read correctly (mostly for CZI files).
"},{"location":"guide-qupath-objects.html#detect-objects","title":"Detect objects","text":""},{"location":"guide-qupath-objects.html#built-in-cell-detection","title":"Built-in cell detection","text":"QuPath has a built-in cell detection feature, available in Analyze > Cell detection
. You hava a full tutorial in the official documentation.
Briefly, this uses a watershed algorithm to find bright spots and can perform a cell expansion to estimate the full cell shape based on the detected nuclei. Therefore, this works best to segment nuclei but one can expect good performance for cells as well, depending on the imaging and staining conditions.
Tip
In scripts/qupath-utils/segmentation
, there is watershedDetectionFilters.groovy
which uses this feature from a script. It further allows you to filter out detected cells based on shape measurements as well as fluorescence itensity in several channels and cell compartments.
Another very powerful and versatile way to segment cells if through machine learning. Note the term \"machine\" and not \"deep\" as it relies on statistics theory from the 1980s. QuPath provides an user-friendly interface to that, similar to what ilastik provides.
The general idea is to train a model to classify every pixel as a signal or as background. You can find good resources on how to procede in the official documentation and some additionnal tips and tutorials on Michael Neslon's blog (here and here).
Specifically, you will manually annotate some pixels of objects of interest and background. Then, you will apply some image processing filters (gaussian blur, laplacian...) to reveal specific features in your images (shapes, textures...). Finally, the pixel classifier will fit a model on those pixel values, so that it will be able to predict if a pixel, given the values with the different filters you applied, belongs to an object of interest or to the background.
This is done in an intuitive GUI with live predictions to get an instant feedback on the effects of the filters and manual annotations.
"},{"location":"guide-qupath-objects.html#train-a-model","title":"Train a model","text":"First and foremost, you should use a QuPath project dedicated to the training of a pixel classifier, as it is the only way to be able to edit it later on.
Classify > Pixel classification > Train pixel classifier
, and turn on Live prediction
.Load training
.Advanced settings
, check Reweight samples
to help make sure a classification is not over-represented.Classifier
: typically, RTrees
or ANN_MLP
. This can be changed dynamically afterwards to see which works best for you.Resolution
: this is the pixel size used. This is a trade-off between accuracy and speed. If your objects are only composed of a few pixels, you'll the full resolution, for big objects reducing the resolution will be faster.Features
: this is the core of the process -- where you choose the filters. In Edit
, you'll need to choose :Output
:Classification
: QuPath will directly classify the pixels. Use that to create objects directly from the pixel classifier within QuPath.Probability
: this will output an image where each pixel is its probability to belong to each of the classifications. This is useful to create objects externally.Show classification
once you begin to make annotations.Begin to annotate ! Use the Polyline annotation tool (V) to classify some pixels belonging to an object and some pixels belonging to the background across your images.
Tip
You can select the RTrees
Classifier, then Edit
: check the Calculate variable importance
checkbox. Then in the log (Ctrl+Shift+L), you can inspect the weight each features have. This can help discard some filters to keep only the ones most efficient to distinguish the objects of interest.
See in live the effect of your annotations on the classification using C and continue until you're satisfied.
Important
This is machine learning. The lesser annotations, the better, as this will make your model more general and adapt to new images. The goal is to find the minimal number of annotations to make it work.
Once you're done, give your classifier a name in the text box in the bottom and save it. It will be stored as a JSON file in the classifiers
folder of the QuPath project. This file can be imported in your other QuPath projects.
Once you imported your model JSON file (Classify > Pixel classification > Load pixel classifier
, three-dotted menu and Import from file
), you can create objects out of it, measure the surface occupied by classified pixels in each annotation or classify existing detections based on the prediction at their centroid.
In scripts/qupath-utils/segmentation
, there is a createDetectionsFromPixelClassifier.groovy
script to batch-process your project.
Alternatively, a Python script provided with cuisto
can be used to segment the probability map generated by the pixel classifier (the script is located in scripts/segmentation
).
You will first need to export those with the exportPixelClassifierProbabilities.groovy
script (located in scripts/qupath-utils
).
Then the segmentation script can :
Several parameters have to be specified by the user, see the segmentation script API reference. This script will generate GeoJson files that can be imported back to QuPath with the importGeojsonFiles.groovy
script.
QuPath being open-source and extensible, there are third-party extensions that implement popular deep learning segmentation algorithms directly in QuPath. They can be used to find objects of interest as detections in the QuPath project and thus integrate nicely with cuisto
to quantify them afterwards.
QuPath extension : https://github.com/qupath/qupath-extension-instanseg Original repository : https://github.com/instanseg/instanseg Reference papers : doi:10.48550/arXiv.2408.15954, doi:10.1101/2024.09.04.611150
"},{"location":"guide-qupath-objects.html#stardist","title":"Stardist","text":"QuPath extension : https://github.com/qupath/qupath-extension-stardist Original repository : https://github.com/stardist/stardist Reference paper : doi:10.48550/arXiv.1806.03535
There is a stardistDetectionFilter.groovy
script in scripts/qupath-utils/segmentation
to use it from a script which further allows you to filter out detected cells based on shape measurements as well as fluorescence itensity in several channels and cell compartments.
QuPath extension : https://github.com/BIOP/qupath-extension-cellpose Original repository : https://github.com/MouseLand/cellpose Reference papers : doi:10.1038/s41592-020-01018-x, doi:10.1038/s41592-022-01663-4, doi:10.1101/2024.02.10.579780
There is a cellposeDetectionFilter.groovy
script in scripts/qupath-utils/segmentation
to use it from a script which further allows you to filter out detected cells based on shape measurements as well as fluorescence itensity in several channels and cell compartments.
QuPath extension : https://github.com/ksugar/qupath-extension-sam Original repositories : samapi, SAM Reference papers : doi:10.1101/2023.06.13.544786, doi:10.48550/arXiv.2304.02643
This is more an interactive annotation tool than a fully automatic segmentation algorithm.
"},{"location":"guide-register-abba.html","title":"Registration with ABBA","text":"The ABBA documentation is quite extensive and contains guided tutorials and a video tutorial. You should therefore check it out ! Nevertheless, you will find below some quick reminders.
"},{"location":"guide-register-abba.html#import-a-qupath-project","title":"Import a QuPath project","text":"Always use ABBA with a QuPath project, if you import the images directly it will not be possible to export the results back to QuPath. In the toolbar, head to Import > Import QuPath Project
.
Warning
ABBA is not the most stable software, it is highly recommended to save in a different file each time you do anything.
"},{"location":"guide-register-abba.html#navigation","title":"Navigation","text":""},{"location":"guide-register-abba.html#interface","title":"Interface","text":"In the right panel, there is everything related to the images, both yours and the atlas.
In the Atlas Display
section, you can turn on and off different channels (the first is the reference image, the last is the regions outlines). The Displayed slicing [atlas steps]
slider can increase or decrease the number of displayed 2D slices extracted from the 3D volume. It is comfortable to set to to the same spacing as your slices. Remember it is in \"altas steps\", so for an atlas imaged at 10\u00b5m, a 120\u00b5m spacing corresponds to 12 atlas steps.
The Slices Display
section lists all your slices. Ctrl+A to select all, and click on the Vis.
header to make them visible. Then, you can turn on and off each channels (generally the NISSL channel and the ChAT channel will be used) by clicking on the corresponding header. Finally, set the display limits clicking on the empty header containing the colors.
Right Button in the main view to Change overlap mode
twice to get the slices right under the atlas slices.
Tip
Every action in ABBA are stored and are cancellable with Right Button+Z, except the Interactive transform.
"},{"location":"guide-register-abba.html#find-position-and-angle","title":"Find position and angle","text":"This is the hardest task. You need to drag the slices along the rostro-caudal axis and modify the virtual slicing angle (X Rotation [deg]
and Y Rotation [deg]
sliders at the bottom of the right panel) until you match the brain structures observed in both your images and the atlas.
Tip
With a high number of slices, most likely, it will be impossible to find a position and slicing angle that works for all your slices. In that case, you should procede in batch, eg. sub-stack of images with a unique position and slicing angle that works for all images in the sub-stack. Then, remove the remaining slices (select them, Right Button > Remove Selected Slices
), but do not remove them from the QuPath project.
Procede as usual, including saving (note the slices range it corresponds to) and exporting the registration back to QuPath. Then, reimport the project in a fresh ABBA instance, remove the slices that were already registered and redo the whole process with the next sub-stack and so on.
Once you found the correct position and slicing angle, it must not change anymore, otherwise the registration operations you perform will not make any sense anymore.
"},{"location":"guide-register-abba.html#in-plane-registration","title":"In-plane registration","text":"The next step is to deform your slices to match the corresponding atlas image, extracted from the 3D volume given the position and virtual slicing angle defined at the previous step.
Info
ABBA makes the choice to deform your slices to the atlas, but the transformations are invertible. This means that you will still be able to work on your raw data and deform the altas onto it instead.
In image processing, there are two kinds of deformation one can apply on an image :
Both can be applied manually or automatically (if the imaging quality allows it). You have different tools to achieve this, all of which can be combined in any order, except the Interactive transform tool (coarse, linear manual deformation).
Change the overlap mode (Right Button) to overlay the slice onto the atlas regions borders. Select the slice you want to align.
"},{"location":"guide-register-abba.html#coarse-linear-manual-deformation","title":"Coarse, linear manual deformation","text":"While not mandatory, if this tool shall be used, it must be before any operation as it is not cancellable. Head to Register > Affine > Interactive transform
. This will open a box where you can rotate, translate and resize the image to make a first, coarse alignment.
Close the box. Again, this is not cancellable. Afterwards, you're free to apply any numbers of transformations in any order.
"},{"location":"guide-register-abba.html#automatic-registration","title":"Automatic registration","text":"This uses the elastix toolbox to compute the transformations needed to best match two images. It is available in both affine and spline mode, in the Register > Affine
and Register > Spline
menus respectively.
In both cases, it will open a dialog where you need to choose :
For the Spline mode, there an additional parameter :
This uses BigWarp to manually deform the images with the mouse. It can be done from scratch (eg. you place the points yourself) or from a previous registration (either a previous BigWarp session or elastix in Spline mode).
"},{"location":"guide-register-abba.html#from-scratch","title":"From scratch","text":"Register > Spline > BigWarp registration
to launch the tool. Choose the atlas that allows you to best see the brain structures (usually the regions outlines channels, the last one), and the reference fluorescence channel.
It will open two viewers, called \"BigWarp moving image\" and \"BigWarp fixed image\". Briefly, they correspond to the two spaces you're working in, the \"Atlas space\" and the \"Slice space\".
Tip
Do not panick yet, while the explanations might be confusing (at least they were to me), in practice, it is easy, intuitive and can even be fun (sometimes, at small dose).
To browse the viewer, use Right Button + drag (Left Button is used to rotate the viewer), Middle Button zooms in and out.
The idea is to place points, called landmarks, that always go in pairs : one in the moving image and one where it corresponds to in the fixed image (or vice-versa). In practice, we will only work in the BigWarp fixed image viewer to place landmarks in both space in one click, then drag it to the corresponding location, with a live feedback of the transformation needed to go from one to another.
To do so :
Press Space to switch to the \"Landmark mode\".
Warning
In \"Landmark mode\", Right Button can't be used to browse the view anymore. To do so, turn off the \"Landmark mode\" hitting Space again.
Use Ctrl+Left Button to place a landmark.
Info
At least 4 landmarks are needed before activating the live-transform view.
When there are at least 4 landmarks, hit T to activate the \"Transformed\" view. Transformed
will be written at the bottom.
OK
.Important remarks and tips
Head to Register > Edit last Registration
to work on a previous registration.
If the previous registration was done with elastix (Spline) or BigWarp, it will launch the BigWarp interface exactly like above, but with landmarks already placed, either on a grid (elastix) or the one you manually placed (BigWarp).
Tip
It will ask which channels to use, you can modify the channel for your slices to work on two channels successively. For instance, one could make a first registration using the NISSL staining, then refine the motoneurons with the ChAT staining, if available.
"},{"location":"guide-register-abba.html#abba-state-file","title":"ABBA state file","text":"ABBA can save the state you're in, from the File > Save State
menu. It will be saved as a .abba
file, which is actually a zip archive containing a bunch of JSON, listing every actions you made and in which order, meaning you will stil be able to cancel actions after quitting ABBA.
To load a state, quit ABBA, launch it again, then choose File > Load State
and select the .abba
file to carry on with the registration.
Save, save, save !
Those state files are cheap, eg. they are lightweight (less than 200KB). You should save the state each time you finish a slice, and you can keep all your files, without overwritting the previous ones, appending a number to its file name. This will allow to roll back to the previous slice in the event of any problem you might face.
"},{"location":"guide-register-abba.html#export-registration-back-to-qupath","title":"Export registration back to QuPath","text":""},{"location":"guide-register-abba.html#export-the-registration-from-abba","title":"Export the registration from ABBA","text":"Once you are satisfied with your registration, select the registered slices and head to Export > QuPath > Export Registrations To QuPath Project
. Check the box to make sure to get the latest registered regions.
It will export several files in the QuPath projects, including the transformed atlas regions ready to be imported in QuPath and the transformations parameters to be able to convert coordinates from the extension.
"},{"location":"guide-register-abba.html#import-the-registration-in-qupath","title":"Import the registration in QuPath","text":"Make sure you installed the ABBA extension in QuPath.
From your project with an image open, the basic usage is to head to Extensions > ABBA > Load Atlas Annotations into Open Image
. Choose to Split Left and Right Regions
to make the two hemispheres independent, and choose the \"acronym\" to name the regions. The registered regions should be imported as Annotations in the image.
Tip
With ABBA in regular Fiji using the CCFv3 Allen mouse brain atlas, the left and right regions are flipped, because ABBA considers the slices as backward facing. The importAbba.groovy
script located in scripts/qupath-utils-atlas
allows you to flip left/right regions names. This is OK because the Allen brain is symmetrical by construction.
For more complex use, check the Groovy scripts in scripts/qupath-utils/atlas
. ABBA registration is used throughout the guides, to either work with brain regions (and count objects for instance) or to get the detections' coordinates in the atlas space.
While cuisto
does not have a reference paper as of now, you can reference the GitHub repository.
Please make sure to cite all the softwares used in your research. Citations are usually the only metric used by funding agencies, so citing properly the tools used in your research ensures the continuation of those projects.
There are three configuration files : altas_blacklist
, atlas_fusion
and a modality-specific file, that we'll call config
in this document. The former two are related to the atlas you're using, the latter is what is used by cuisto
to know what and how to compute and display things. There is a fourth, optional, file, used to provide some information on a specific experiment, info
.
The configuration files are in the TOML file format, that are basically text files formatted in a way that is easy to parse in Python. See here for a basic explanation of the syntax.
Most lines of each template file are commented to explain what each parameter do.
"},{"location":"main-configuration-files.html#atlas_blacklisttoml","title":"atlas_blacklist.toml","text":"Click to see an example file atlas_blacklist.toml# TOML file to list Allen brain regions to ignore during analysis.\n# \n# It is used to blacklist regions and all descendants regions (\"WITH_CHILD\").\n# Objects belonging to those regions and their descendants will be discarded.\n# And you can specify an exact region where to remove objects (\"EXACT\"),\n# descendants won't be affected.\n# Use it to remove noise in CBX, ventricual systems and fiber tracts.\n# Regions are referenced by their exact acronym.\n#\n# Syntax :\n# [WITH_CHILDS]\n# members = [\"CBX\", \"fiber tracts\", \"VS\"]\n#\n# [EXACT]\n# members = [\"CB\"]\n\n\n[WITH_CHILDS]\nmembers = [\"CBX\", \"fiber tracts\", \"VS\"]\n\n[EXACT]\nmembers = [\"CB\"]\n
This file is used to filter out specified regions and objects belonging to them.
members
keys will be ignored. Objects whose parents are in here will be ignored as well.[WITH_CHILDS]
section, regions and objects belonging to those regions and all descending regions (child regions, as per the altas hierarchy) will be removed.[EXACT]
section, only regions and objects belonging to those exact regions are removed. Descendants regions are not taken into account.# TOML file to determine which brain regions should be merged together.\n# Regions are referenced by their exact acronym.\n# The syntax should be the following :\n# \n# [MY]\n# name = \"Medulla\" # new or existing full name\n# acronym = \"MY\" # new or existing acronym\n# members = [\"MY-mot\", \"MY-sat\"] # existing Allen Brain acronyms that should belong to the new region\n#\n# Then, regions labelled \"MY-mot\" and \"MY-sat\" will be labelled \"MY\" and will join regions already labelled \"MY\".\n# What's in [] does not matter but must be unique and is used to group.\n# The new \"name\" and \"acronym\" can be existing Allen Brain regions or a new (meaningful) one.\n# Note that it is case sensitive.\n\n[PHY]\nname = \"Perihypoglossal nuclei\"\nacronym = \"PHY\"\nmembers = [\"NR\", \"PRP\"]\n\n[NTS]\nname = \"Nucleus of the solitary tract\"\nacronym = \"NTS\"\nmembers = [\"ts\", \"NTSce\", \"NTSco\", \"NTSge\", \"NTSl\", \"NTSm\"]\n\n[AMB]\nname = \"Nucleus ambiguus\"\nacronym = \"AMB\"\nmembers = [\"AMBd\", \"AMBv\"]\n\n[MY]\nname = \"Medulla undertermined\"\nacronym = \"MYu\"\nmembers = [\"MY-mot\", \"MY-sat\"]\n\n[IRN]\nname = \"Intermediate reticular nucleus\"\nacronym = \"IRN\"\nmembers = [\"IRN\", \"LIN\"]\n
This file is used to group regions together, to customize the atlas' hierarchy. It is particularly useful to group smalls brain regions that are impossible to register precisely. Keys name
, acronym
and members
should belong to a [section]
.
[section]
is just for organizing, the name does not matter but should be unique.name
should be a human-readable name for your new region.acronym
is how the region will be refered to. It can be a new acronym, or an existing one.members
is a list of acronyms of atlas regions that should be part of the new one.########################################################################################\n# Configuration file for cuisto package\n# -----------------------------------------\n# This is a TOML file. It maps a key to a value : `key = value`.\n# Each key must exist and be filled. The keys' names can't be modified, except:\n# - entries in the [channels.names] section and its corresponding [channels.colors] section,\n# - entries in the [regions.metrics] section. \n#\n# It is strongly advised to NOT modify this template but rather copy it and modify the copy.\n# Useful resources :\n# - the TOML specification : https://toml.io/en/\n# - matplotlib colors : https://matplotlib.org/stable/gallery/color/color_demo.html\n#\n# Configuration file part of the python cuisto package.\n# version : 2.1\n########################################################################################\n\nobject_type = \"Cells\" # name of QuPath base classification (eg. without the \": subclass\" part)\nsegmentation_tag = \"cells\" # type of segmentation, matches directory name, used only in the full pipeline\n\n[atlas] # information related to the atlas used\nname = \"allen_mouse_10um\" # brainglobe-atlasapi atlas name\ntype = \"brain\" # brain or cord (eg. registration done in ABBA or abba_python)\nmidline = 5700 # midline Z coordinates (left/right limit) in microns\noutline_structures = [\"root\", \"CB\", \"MY\", \"P\"] # structures to show an outline of in heatmaps\n\n[channels] # information related to imaging channels\n[channels.names] # must contain all classifications derived from \"object_type\"\n\"marker+\" = \"Positive\" # classification name = name to display\n\"marker-\" = \"Negative\"\n[channels.colors] # must have same keys as names' keys\n\"marker+\" = \"#96c896\" # classification name = matplotlib color (either #hex, color name or RGB list)\n\"marker-\" = \"#688ba6\"\n\n[hemispheres] # information related to hemispheres\n[hemispheres.names]\nLeft = \"Left\" # Left = name to display\nRight = \"Right\" # Right = name to display\n[hemispheres.colors] # must have same keys as names' keys\nLeft = \"#ff516e\" # Left = matplotlib color (either #hex, color name or RGB list)\nRight = \"#960010\" # Right = matplotlib color\n\n[distributions] # spatial distributions parameters\nstereo = true # use stereotaxic coordinates (Paxinos, only for brain)\nap_lim = [-8.0, 0.0] # bins limits for anterio-posterior\nap_nbins = 75 # number of bins for anterio-posterior\ndv_lim = [-1.0, 7.0] # bins limits for dorso-ventral\ndv_nbins = 50 # number of bins for dorso-ventral\nml_lim = [-5.0, 5.0] # bins limits for medio-lateral\nml_nbins = 50 # number of bins for medio-lateral\nhue = \"channel\" # color curves with this parameter, must be \"hemisphere\" or \"channel\"\nhue_filter = \"Left\" # use only a subset of data. If hue=hemisphere : channel name, list of such or \"all\". If hue=channel : hemisphere name or \"both\".\ncommon_norm = true # use a global normalization for each hue (eg. the sum of areas under all curves is 1)\n[distributions.display]\nshow_injection = false # add a patch showing the extent of injection sites. Uses corresponding channel colors\ncmap = \"OrRd\" # matplotlib color map for heatmaps\ncmap_nbins = 50 # number of bins for heatmaps\ncmap_lim = [1, 50] # color limits for heatmaps\n\n[regions] # distributions per regions parameters\nbase_measurement = \"Count\" # the name of the measurement in QuPath to derive others from\nhue = \"channel\" # color bars with this parameter, must be \"hemisphere\" or \"channel\"\nhue_filter = \"Left\" # use only a subset of data. If hue=hemisphere : channel name, list of such or \"all\". If hue=channel : hemisphere name or \"both\".\nhue_mirror = false # plot two hue_filter in mirror instead of discarding the other\nnormalize_starter_cells = false # normalize non-relative metrics by the number of starter cells\n[regions.metrics] # names of metrics. Do not change the keys !\n\"density \u00b5m^-2\" = \"density \u00b5m^-2\"\n\"density mm^-2\" = \"density mm^-2\"\n\"coverage index\" = \"coverage index\"\n\"relative measurement\" = \"relative count\"\n\"relative density\" = \"relative density\"\n[regions.display]\nnregions = 18 # number of regions to display (sorted by max.)\norientation = \"h\" # orientation of the bars (\"h\" or \"v\")\norder = \"max\" # order the regions by \"ontology\" or by \"max\". Set to \"max\" to provide a custom order\ndodge = true # enforce the bar not being stacked\nlog_scale = false # use log. scale for metrics\n[regions.display.metrics] # name of metrics to display\n\"count\" = \"count\" # real_name = display_name, with real_name the \"values\" in [regions.metrics]\n\"density mm^-2\" = \"density (mm^-2)\"\n\n[files] # full path to information TOML files\nblacklist = \"../../atlas/atlas_blacklist.toml\"\nfusion = \"../../atlas/atlas_fusion.toml\"\noutlines = \"/data/atlases/allen_mouse_10um_outlines.h5\"\ninfos = \"../../configs/infos_template.toml\"\n
This file is used to configure cuisto
behavior. It specifies what to compute, how, and display parameters such as colors associated to each classifications, hemisphere names, distributions bins limits...
Warning
When editing your config.toml file, you're allowed to modify the keys only in the [channels]
section.
object_type
: name of QuPath base classification (eg. without the \": subclass\" part) segmentation_tag
: type of segmentation, matches directory name, used only in the full pipeline
atlas Information related to the atlas used
name
: brainglobe-atlasapi atlas name type
: \"brain\" or \"cord\" (eg. registration done in ABBA or abba_python). This will determine whether to flip Left/Right when determining detections hemisphere based on their coordinates. Also adapts the axes in the 2D heatmaps. midline
: midline Z coordinates (left/right limit) in microns to determine detections hemisphere based on their coordinates. outline_structures
: structures to show an outline of in heatmaps
channels Information related to imaging channels
names Must contain all classifications derived from \"object_type\" you want to process. In the form subclassification name = name to display on the plots
\"marker+\"
: classification name = name to display \"marker-\"
: add any number of sub-classification
colors Must have same keys as \"names\" keys, in the form subclassification name = color
, with color specified as a matplotlib named color, an RGB list or an hex code.
\"marker+\"
: classification name = matplotlib color \"marker-\"
: must have the same entries as \"names\".
hemispheres Information related to hemispheres, same structure as channels
names
Left
: Left = name to display Right
: Right = name to display
colors Must have same keys as names' keys
Left
: ff516e\" # Left = matplotlib color (either #hex, color name or RGB list) Right
: 960010\" # Right = matplotlib color
distributions Spatial distributions parameters
stereo
: use stereotaxic coordinates (as in Paxinos, only for mouse brain CCFv3) ap_lim
: bins limits for anterio-posterior in mm ap_nbins
: number of bins for anterio-posterior dv_lim
: bins limits for dorso-ventral in mm dv_nbins
: number of bins for dorso-ventral ml_lim
: bins limits for medio-lateral in mm ml_nbins
: number of bins for medio-lateral hue
: color curves with this parameter, must be \"hemisphere\" or \"channel\" hue_filter
: use only a subset of data
common_norm
: use a global normalization (eg. the sum of areas under all curves is 1). Otherwise, normalize each hue individually
display Display parameters
show_injection
: add a patch showing the extent of injection sites. Uses corresponding channel colors. Requires the information TOML configuration file set up cmap
: matplotlib color map for 2D heatmaps cmap_nbins
: number of bins for 2D heatmaps cmap_lim
: color limits for 2D heatmaps
regions Distributions per regions parameters
base_measurement
: the name of the measurement in QuPath to derive others from. Usually \"Count\" or \"Length \u00b5m\" hue
: color bars with this parameter, must be \"hemisphere\" or \"channel\" hue_filter
: use only a subset of data
hue_mirror
: plot two hue_filter in mirror instead of discarding the others. For example, if hue=channel and hue_filter=\"both\", plots the two hemisphere in mirror. normalize_starter_cells
: normalize non-relative metrics by the number of starter cells
metrics Names of metrics. The keys are used internally in cuisto as is so should NOT be modified. The values will only chang etheir names in the ouput file
\"density \u00b5m^-2\"
: relevant name \"density mm^-2\"
: relevant name \"coverage index\"
: relevant name \"relative measurement\"
: relevant name \"relative density\"
: relevant name
display
nregions
: number of regions to display (sorted by max.) orientation
: orientation of the bars (\"h\" or \"v\") order
: order the regions by \"ontology\" or by \"max\". Set to \"max\" to provide a custom order dodge
: enforce the bar not being stacked log_scale
: use log. scale for metrics
metrics name of metrics to display
\"count\"
: real_name = display_name, with real_name the \"values\" in [regions.metrics] \"density mm^-2\"
files Full path to information TOML files and atlas outlines for 2D heatmaps.
blacklist
fusion
outlines
infos
# TOML file to specify experimental settings of each animals.\n# Syntax should be :\n# [animalid0] # animal ID\n# slice_thickness = 30 # slice thickness in microns\n# slice_spacing = 60 # spacing between two slices in microns\n# [animalid0.marker-name] # [{Animal id}.{segmented channel name}]\n# starter_cells = 190 # number of starter cells\n# injection_site = [x, y, z] # approx. injection site in CCFv3 coordinates\n#\n# --------------------------------------------------------------------------\n[animalid0]\nslice_thickness = 30\nslice_spacing = 60\n[animalid0.\"marker+\"]\nstarter_cells = 150\ninjection_site = [ 10.8937328, 6.18522070, 6.841855301 ]\n[animalid0.\"marker-\"]\nstarter_cells = 175\ninjection_site = [ 10.7498512, 6.21545461, 6.815487203 ]\n# --------------------------------------------------------------------------\n[animalid1-SC]\nslice_thickness = 30\nslice_spacing = 120\n[animalid1-SC.EGFP]\nstarter_cells = 250\ninjection_site = [ 10.9468211, 6.3479642, 6.0061113 ]\n[animalid1-SC.DsRed]\nstarter_cells = 275\ninjection_site = [ 10.9154874, 6.2954872, 8.1587125 ]\n# --------------------------------------------------------------------------\n
This file is used to specify injection sites for each animal and each channel, to display it in distributions.
"},{"location":"main-getting-help.html","title":"Getting help","text":"For help in QuPath, ABBA, Fiji or any image processing-related questions, your one stop is the image.sc forum. There, you can search with specific tags (#qupath
, #abba
, ...). You can also ask questions or even answer to some by creating an account !
For help with cuisto
in particular, you can open an issue in Github (which requires an account as well), or send an email to me or Antoine Lesage.
conda create -c conda-forge -n cuisto-env python=3.12\n
conda activate cuisto-env\n
cuisto-xxx
folder : pip install .\n
If you want to build the doc : pip install .[doc]\n
Tip
If all goes well, you shouldn't need any admin rights to install the various pieces of software used before cuisto
.
Important
Remember to cite all softwares you use ! See Citing.
"},{"location":"main-getting-started.html#qupath","title":"QuPath","text":"QuPath is an \"open source software for bioimage analysis\". You can install it from the official website : https://qupath.github.io/. The documentation is quite clear and comprehensive : https://qupath.readthedocs.io/en/stable/index.html.
This is where you'll create QuPath projects, in which you'll be able to browse your images, annotate them, import registered brain regions and find objects of interests (via automatic segmentation, thresholding, pixel classification, ...). Then, those annotations and detections can be exported to be processed by cuisto
.
This is the tool you'll use to register 2D histological sections to 3D atlases. See the dedicated page.
"},{"location":"main-getting-started.html#python-virtual-environment-manager-conda","title":"Python virtual environment manager (conda
)","text":"The cuisto
package is written in Python. It depends on scientific libraries (such as NumPy, pandas and many more). Those libraries need to be installed in versions that are compatible with each other and with cuisto
. To make sure those versions do not conflict with other Python tools you might be using (deeplabcut
, abba_python
, ...), we will install cuisto
and its dependencies in a dedicated virtual environment.
conda
is a software that takes care of this. It comes with a \"base\" environment, from which we will create and manage other, project-specific environments. It is also used to download and install python in each of those environments, as well as third-party libraries. conda
in itself is free and open-source and can be used freely by anyone.
It is included with the Anaconda distribution, which is subject to specific terms of service, which state that unless you're an individual, a member of a company with less than 200 employees or a member of an university (but not a national research lab) it's free to use, otherwise, you need to pay a licence. conda
, while being free, is by default configured to use the \"defaults\" channel to fetch the packages (including Python itself), a repository operated by Anaconda, which is, itself, subject to the Anaconda terms of service.
In contrast, conda-forge is a community-run repository that contains more numerous and more update-to-date packages. This is free to use for anyone. The idea is to use conda
directly (instead of Anaconda graphical interface) and download packages from conda-forge (instead of the Anaconda-run defaults). To try to decipher this mess, Anaconda provides this figure :
Furthermore, the \"base\" conda environment installed with the Anaconda distribution is bloated and already contains tons of libraries, and tends to self-destruct at some point (eg. becomes unable to resolve the inter-dependencies), which makes you unable to install new libraries nor create new environments.
This is why it is highly recommended to install Miniconda instead, a minimal installer for conda, and configure it to use the free, community-run channel conda-forge, or, even better, use Miniforge which is basically the same but pre-configured to use conda-forge. The only downside is that will not get the Anaonda graphical user interface and you'll need to use the terminal instead, but worry not ! We got you covered.
conda init\n
This will activate conda and its base environment whenever you open a new PowerShell window. Now, when opening a new PowerShell (or terminal), you should see a prompt like this : (base) PS C:\\Users\\myname>\n
Tip
If Anaconda is already installed and you don't have the rights to uninstall it, you'll have to use it instead. You can launch the \"Anaconda Prompt (PowerShell)\", run conda init
. Open a regular PowerShell window and run conda config --add channels conda-forge
, so that subsequent installations and environments creation will fetch required dependencies from conda-forge.
This section explains how to actually install the cuisto
package. The following commands should be run from a terminal (PowerShell). Remember that the -c conda-forge
bits are not necessary if you installed conda with the miniforge distribution.
conda create -c conda-forge -n cuisto-env python=3.12\n
cuisto
Source code .zip package, from the Releases page.cuisto-env
environment we just created. First, you need to activate the cuisto-env
environment : conda activate cuisto-env\n
Now, the prompt should look like this : (cuisto-env) PS C:\\Users\\myname>\n
This means that Python packages will now be installed in the cuisto-env
environment and won't conflict with other toolboxes you might be using. Then, we use pip
to install cuisto
. pip
was installed with Python, and will scan the cuisto
folder, specifically the \"pyproject.toml\" file that lists all the required dependencies. To do so, you can either :pip install /path/to/cuisto\n
cd /path/to/cuisto\n
Then install the package, \".\" denotes \"here\" : pip install .\n
cuisto
folder, use Shift+Right Button to \"Open PowerShell window here\" and run : pip install .\n
cuisto
is now installed inside the cuisto-env
environment and will be available in Python from that environment !
Tip
You will need to perform step 3. each time you want to update the package.
If you already have registered data and cells in QuPath, you can export Annotations and Detections as TSV files and head to the Example section.
"},{"location":"main-using-notebooks.html","title":"Using notebooks","text":"A Jupyter notebook is a way to use Python in an interactive manner. It uses cells that contain Python code, and that are to be executed to immediately see the output, including figures.
You can see some rendered notebooks in the examples here, but you can also download them (downward arrow button on the top right corner of each notebook) and run them locally with your own data.
To do so, you can either use an integrated development environment (basically a supercharged text editor) that supports Jupyter notebooks, or directly the Jupyter web interface.
IDEJupyter web interfaceYou can use for instance Visual Studio Code, also known as vscode.
cd Documents\\notebooks
or, in the file explorer in your \"notebooks\" folder, Shift+Right Button to \"Open PowerShell window here\")conda activate cuisto-env\n
jupyter lab\n
This should open a web page where you can open the ipynb files.With cuisto
, it is possible to plot 2D heatmaps on brain contours.
All the detections are projected in a single plane, thus it is up to you to select a relevant data range. It is primarily intended to give a quick, qualitative overview of the spreading of your data.
To do so, it requires the brain regions outlines, stored in a hdf5 file. This can be generated with brainglobe-atlasapi
. The generate_atlas_outlines.py
located in scripts/atlas
will show you how to make such a file, that the cuisto.display
module can use.
Alternatively it is possible to directly plot density maps without cuisto
, using brainglobe-heatmap
. An example is shown here.
The representation of an image in a computer is basically a table where each element represents the pixel value (see more here). It can be n-dimensional, where the typical dimensions would be \\((x, y, z)\\), time and the fluorescence channels.
In large images, such as histological slices that are more than 10000\\(\\times\\)10000 pixels, a strategy called tiling is used to optimize access to specific regions in the image. Storing the whole image at once in a file would imply to load the whole thing at once in the memory (RAM), even though one would only need to access a given rectangular region with a given zoom. Instead, the image is stored as tiles, small squares (512--2048 pixels) that pave the whole image and are used to reconstruct the original image. Therefore, when zooming-in, only the relevant tiles are loaded and displayed, allowing for smooth large image navigation. This process is done seamlessly by software like QuPath and BigDataViewer (the Fiji plugin ABBA is based on) when loading tiled images. This is also leveraged for image processing in QuPath, which will work on tiles instead of the whole image to not saturate your computer RAM.
Most images are already tiled, including Zeiss CZI images. Note that those tiles do not necessarily correspond to the actual, real-world, tiles the microscope did to image the whole slide.
"},{"location":"tips-formats.html#pyramids","title":"Pyramids","text":"In the same spirit as tiles, it would be a waste to have to load the entire image (and all the tiles) at once when viewing the image at max zoom-out, as your monitor nor your eyes would handle it. Instead, smaller, rescaled versions of the original image are stored alongside it, and depending on the zoom you are using, the sub-resolution version is displayed. Again, this is done seamlessly by QuPath and ABBA, allowing you to quickly switch from an image to another, without having to load the GB-sized image. Also, for image processing that does not require the original pixel size, QuPath can also leverage pyramids to go faster.
Usually, upon openning a CZI file in ZEN, there is a pop-up suggesting you to generate pyramids. It is a very good idea to say yes, wait a bit and save the file so that the pyramidal levels are saved within the file.
"},{"location":"tips-formats.html#metadata","title":"Metadata","text":"Metadata, while often overlooked, are of paramount importance in microscopy data. It allows both softwares and users to interpret the raw data of images, eg. the values of each pixels. Most image file formats support this, including the microcope manufacturer file formats. Metadata may include :
Pixel size is the parameter that is absolutely necessary. Channel names and colors are more a quality of life feature, to make sure not to mix your difference fluorescence channels. CZI files or exported OME-TIFF files include this out of the box so you don't really need to pay attention.
"},{"location":"tips-formats.html#bio-formats","title":"Bio-formats","text":"Bio-formats is an initiative of the Open Microscopy Environment (OME) consortium, aiming at being able to read proprietary microscopy image data and metadata. It is used in QuPath, Fiji and ABBA.
This page summarizes the level of support of numerous file formats. You can see that Zeiss CZI files and Leica LIF are quite well supported, and should therefore work out of the box in QuPath.
"},{"location":"tips-formats.html#zeiss-czi-files","title":"Zeiss CZI files","text":"QuPath and ABBA supports any Bio-formats supported, tiled, pyramidal images.
If you're in luck, adding the pyramidal CZI file to your QuPath project will just work. If it doesn't, you'll notice immediately : the tiles will be shuffled and you'll see only a part of the image instead of the whole one. Unfortunately I was not able to determine why this happens and did not find a way to even predict if a file will or will not work.
In the event you experience this bug, you'll need to export the CZI files to OME-TIFF files from ZEN, then generate tiled pyramidal images with the pyramid-creator
package that you can find here.
Markdown is a markup language to create formatted text. It is basically a simple text file that could be opened with any text editor software (notepad and the like), but features specific tags to format the text with heading levels, typesetting (bold, itallic), links, lists... This very page is actually written in markdown, and the engine that builds it renders the text in a nicely formatted manner.
If you open a .md file with vscode for example, you'll get a magnigying glass on the top right corner to switch to the rendered version of the file.
"},{"location":"tips-formats.html#toml-toml-files","title":"TOML (.toml) files","text":"TOML, or Tom's Obvious Minimal Language, is a configuration file format (similar to YAML). Again, it is basically a simple text file that can be opened with any text editor and is human-readable, but also computer-readable. This means that it is easy for most software and programming language to parse the file to associate a variable (or \"key\") to a value, thus making it a good file format for configuration. It is used in cuisto
(see The configuration files page).
The syntax looks like this :
# a comment, ignored by the computer\nkey1 = 10 # the key \"key1\" is mapped to the number 10\nkey2 = \"something\" # \"key2\" is mapped to the string \"something\"\nkey3 = [\"something else\", 1.10, -25] # \"key3\" is mapped to a list with 3 elements\n[section] # we can declare sections\nkey1 = 5 # this is not \"key1\", it actually is section.key1\n[section.example] # we can have nested sections\nkey1 = true # this is section.example.key1, mapped to the boolean True\n
You can check the full specification of this language here.
"},{"location":"tips-formats.html#csv-csv-tsv-files","title":"CSV (.csv, .tsv) files","text":"CSV (or TSV) stands for Comma-Separated Values (or Tab-Separated Values) and is, once again, a simple text file formatted in a way that allows LibreOffice Calc (or Excel) to open them as a table. Lines of the table are delimited with new lines, and columns are separated with commas (,
) or tabulations. Those files are easily parsed by programming languages (including Python). QuPath can export annotations and detections measurements in TSV format.
JSON is a \"data-interchange format\". It is used to store data, very much like toml, but supports more complex data and is more efficient to read and write, but is less human-readable. It is used in cuisto
to store fibers-like objects coordinates, as they can contain several millions of points (making CSV not usable).
GeoJson is a file format used to store geographic data structures, basically objects coordinates with various shapes. It is based on and compatible with JSON, which makes it easy to parse in numerous programming language. It used in QuPath to import and export objects, that can be point, line, polygons...
"},{"location":"tips-qupath.html","title":"QuPath","text":""},{"location":"tips-qupath.html#custom-scripts","title":"Custom scripts","text":"While QuPath graphical user interface (GUI) should meet a lot of your needs, it is very convenient to use scripting to automate certain tasks, execute them in batch (on all your images) and do things you couldn't do otherwise. QuPath uses the Groovy programming language, which is mostly Java.
Warning
Not all commands will appear in the history.
In QuPath, in the left panel in the \"Workflow\" tab, there is an history of most of the commands you used during the session. On the bottom, you can click on Create workflow
to select the relevant commands and create a script. This will open the built-in script editor that will contain the groovy version of what you did graphically.
Tip
The scripts/qupath-utils
folder contains a bunch of utility scripts.
They can be run in batch with the three-dotted menu on the bottom right corner of the script editor : Run for project
, then choose the images you want the script to run on.
This notebook shows how to load data exported from QuPath, compute metrics and display them, according to the configuration file. This is meant for a single-animal.
There are some conventions that need to be met in the QuPath project so that the measurements are usable with cuisto
:
You should copy this notebook, the configuration file and the atlas-related configuration files (blacklist and fusion) elsewhere and edit them according to your need.
The data was generated from QuPath with stardist cell detection on toy data.
In\u00a0[1]: Copied!import pandas as pd\n\nimport cuisto\nimport pandas as pd import cuisto In\u00a0[2]: Copied!
# Full path to your configuration file, edited according to your need beforehand\nconfig_file = \"../../resources/demo_config_cells.toml\"\n# Full path to your configuration file, edited according to your need beforehand config_file = \"../../resources/demo_config_cells.toml\" In\u00a0[3]: Copied!
# - Files\n# animal identifier\nanimal = \"animalid0\"\n# set the full path to the annotations tsv file from QuPath\nannotations_file = \"../../resources/cells_measurements_annotations.tsv\"\n# set the full path to the detections tsv file from QuPath\ndetections_file = \"../../resources/cells_measurements_detections.tsv\"\n# - Files # animal identifier animal = \"animalid0\" # set the full path to the annotations tsv file from QuPath annotations_file = \"../../resources/cells_measurements_annotations.tsv\" # set the full path to the detections tsv file from QuPath detections_file = \"../../resources/cells_measurements_detections.tsv\" In\u00a0[4]: Copied!
# get configuration\ncfg = cuisto.config.Config(config_file)\n# get configuration cfg = cuisto.config.Config(config_file) In\u00a0[5]: Copied!
# read data\ndf_annotations = pd.read_csv(annotations_file, index_col=\"Object ID\", sep=\"\\t\")\ndf_detections = pd.read_csv(detections_file, index_col=\"Object ID\", sep=\"\\t\")\n\n# remove annotations that are not brain regions\ndf_annotations = df_annotations[df_annotations[\"Classification\"] != \"Region*\"]\ndf_annotations = df_annotations[df_annotations[\"ROI\"] != \"Rectangle\"]\n\n# convert atlas coordinates from mm to microns\ndf_detections[[\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"]] = df_detections[\n [\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"]\n].multiply(1000)\n\n# have a look\ndisplay(df_annotations.head())\ndisplay(df_detections.head())\n# read data df_annotations = pd.read_csv(annotations_file, index_col=\"Object ID\", sep=\"\\t\") df_detections = pd.read_csv(detections_file, index_col=\"Object ID\", sep=\"\\t\") # remove annotations that are not brain regions df_annotations = df_annotations[df_annotations[\"Classification\"] != \"Region*\"] df_annotations = df_annotations[df_annotations[\"ROI\"] != \"Rectangle\"] # convert atlas coordinates from mm to microns df_detections[[\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"]] = df_detections[ [\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"] ].multiply(1000) # have a look display(df_annotations.head()) display(df_detections.head()) Image Object type Name Classification Parent ROI Centroid X \u00b5m Centroid Y \u00b5m Cells: marker+ Count Cells: marker- Count ID Side Parent ID Num Detections Num Cells: marker+ Num Cells: marker- Area \u00b5m^2 Perimeter \u00b5m Object ID 4781ed63-0d8e-422e-aead-b685fbe20eb5 animalid0_030.ome.tiff Annotation Root NaN Root object (Image) Geometry 5372.5 3922.1 0 0 NaN NaN NaN 2441 136 2305 31666431.6 37111.9 aa4b133d-13f9-42d9-8c21-45f143b41a85 animalid0_030.ome.tiff Annotation root Right: root Root Polygon 7094.9 4085.7 0 0 997 0.0 NaN 1284 41 1243 15882755.9 18819.5 42c3b914-91c5-4b65-a603-3f9431717d48 animalid0_030.ome.tiff Annotation grey Right: grey root Geometry 7256.8 4290.6 0 0 8 0.0 997.0 1009 24 985 12026268.7 49600.3 887af3eb-4061-4f8a-aa4c-fe9b81184061 animalid0_030.ome.tiff Annotation CB Right: CB grey Geometry 7778.7 3679.2 0 16 512 0.0 8.0 542 5 537 6943579.0 30600.2 adaabc05-36d1-4aad-91fe-2e904adc574f animalid0_030.ome.tiff Annotation CBN Right: CBN CB Geometry 6790.5 3567.9 0 0 519 0.0 512.0 55 1 54 864212.3 7147.4 Image Object type Name Classification Parent ROI Atlas_X Atlas_Y Atlas_Z Object ID 5ff386a8-5abd-46d1-8e0d-f5c5365457c1 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11523.0 4272.4 4276.7 9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11520.2 4278.4 4418.6 481a519b-8b40-4450-9ec6-725181807d72 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11506.0 4317.2 4356.3 fd28e09c-2c64-4750-b026-cd99e3526a57 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11528.4 4257.4 4336.4 3d9ce034-f2ed-4c73-99be-f782363cf323 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11548.7 4203.3 4294.3 In\u00a0[6]: Copied!
# get distributions per regions, spatial distributions and coordinates\ndf_regions, dfs_distributions, df_coordinates = cuisto.process.process_animal(\n animal, df_annotations, df_detections, cfg, compute_distributions=True\n)\n\n# have a look\ndisplay(df_regions.head())\ndisplay(df_coordinates.head())\n# get distributions per regions, spatial distributions and coordinates df_regions, dfs_distributions, df_coordinates = cuisto.process.process_animal( animal, df_annotations, df_detections, cfg, compute_distributions=True ) # have a look display(df_regions.head()) display(df_coordinates.head()) Name hemisphere Area \u00b5m^2 Area mm^2 count density \u00b5m^-2 density mm^-2 coverage index relative count relative density channel animal 0 ACVII Left 8307.1 0.008307 1 0.00012 120.378953 0.00012 0.002132 0.205275 Positive animalid0 0 ACVII Left 8307.1 0.008307 1 0.00012 120.378953 0.00012 0.000189 0.020671 Negative animalid0 1 ACVII Right 7061.4 0.007061 0 0.0 0.0 0.0 0.0 0.0 Positive animalid0 1 ACVII Right 7061.4 0.007061 1 0.000142 141.614977 0.000142 0.000144 0.021646 Negative animalid0 2 ACVII both 15368.5 0.015369 1 0.000065 65.068159 0.000065 0.001362 0.153797 Positive animalid0 Image Object type Name Classification Parent ROI Atlas_X Atlas_Y Atlas_Z hemisphere channel Atlas_AP Atlas_DV Atlas_ML animal Object ID 5ff386a8-5abd-46d1-8e0d-f5c5365457c1 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11.5230 4.2724 4.2767 Right Negative -6.433716 3.098278 -1.4233 animalid0 9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11.5202 4.2784 4.4186 Right Negative -6.431449 3.104147 -1.2814 animalid0 481a519b-8b40-4450-9ec6-725181807d72 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11.5060 4.3172 4.3563 Right Negative -6.420685 3.141780 -1.3437 animalid0 fd28e09c-2c64-4750-b026-cd99e3526a57 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11.5284 4.2574 4.3364 Right Negative -6.437788 3.083737 -1.3636 animalid0 3d9ce034-f2ed-4c73-99be-f782363cf323 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11.5487 4.2033 4.2943 Right Negative -6.453296 3.031224 -1.4057 animalid0 In\u00a0[7]: Copied!
# plot distributions per regions\nfigs_regions = cuisto.display.plot_regions(df_regions, cfg)\n# specify which regions to plot\n# figs_regions = cuisto.display.plot_regions(df_regions, cfg, names_list=[\"GRN\", \"IRN\", \"MDRNv\"])\n\n# save as svg\n# figs_regions[0].savefig(r\"C:\\Users\\glegoc\\Downloads\\regions_count.svg\")\n# figs_regions[1].savefig(r\"C:\\Users\\glegoc\\Downloads\\regions_density.svg\")\n# plot distributions per regions figs_regions = cuisto.display.plot_regions(df_regions, cfg) # specify which regions to plot # figs_regions = cuisto.display.plot_regions(df_regions, cfg, names_list=[\"GRN\", \"IRN\", \"MDRNv\"]) # save as svg # figs_regions[0].savefig(r\"C:\\Users\\glegoc\\Downloads\\regions_count.svg\") # figs_regions[1].savefig(r\"C:\\Users\\glegoc\\Downloads\\regions_density.svg\") In\u00a0[8]: Copied!
# plot 1D distributions\nfig_distrib = cuisto.display.plot_1D_distributions(\n dfs_distributions, cfg, df_coordinates=df_coordinates\n)\n# plot 1D distributions fig_distrib = cuisto.display.plot_1D_distributions( dfs_distributions, cfg, df_coordinates=df_coordinates )
If there were several animal
in the measurement file, it would be displayed as mean +/- sem instead.
# plot heatmap (all types of cells pooled)\nfig_heatmap = cuisto.display.plot_2D_distributions(df_coordinates, cfg)\n# plot heatmap (all types of cells pooled) fig_heatmap = cuisto.display.plot_2D_distributions(df_coordinates, cfg)"},{"location":"demo_notebooks/density_map.html","title":"Density map","text":"
Draw 2D heatmaps as density isolines.
This notebook does not actually use histoquant
and relies only on brainglobe-heatmap to extract brain structures outlines.
Only the detections measurements with atlas coordinates exported from QuPath are used.
You need to select the range of data to be used, the regions outlines will be extracted at the centroid of that range. Therefore, a range that is too large will be misleading and irrelevant.
In\u00a0[1]: Copied!import brainglobe_heatmap as bgh\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport brainglobe_heatmap as bgh import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns In\u00a0[2]: Copied!
# path to the exported measurements from QuPath\nfilename = \"../../resources/cells_measurements_detections.tsv\"\n# path to the exported measurements from QuPath filename = \"../../resources/cells_measurements_detections.tsv\"
Settings
In\u00a0[3]: Copied!# atlas to use\natlas_name = \"allen_mouse_10um\"\n# brain regions whose outlines will be plotted\nregions = [\"root\", \"CB\", \"MY\", \"GRN\", \"IRN\"]\n# range to include, in Allen coordinates, in microns\nap_lims = [9800, 10000] # lims : [0, 13200] for coronal\nml_lims = [5600, 5800] # lims : [0, 11400] for sagittal\ndv_lims = [3900, 4100] # lims : [0, 8000] for top\n# number of isolines\nnlevels = 5\n# color mapping between classification and matplotlib color\npalette = {\"Cells: marker-\": \"#d8782f\", \"Cells: marker+\": \"#8ccb73\"}\n# atlas to use atlas_name = \"allen_mouse_10um\" # brain regions whose outlines will be plotted regions = [\"root\", \"CB\", \"MY\", \"GRN\", \"IRN\"] # range to include, in Allen coordinates, in microns ap_lims = [9800, 10000] # lims : [0, 13200] for coronal ml_lims = [5600, 5800] # lims : [0, 11400] for sagittal dv_lims = [3900, 4100] # lims : [0, 8000] for top # number of isolines nlevels = 5 # color mapping between classification and matplotlib color palette = {\"Cells: marker-\": \"#d8782f\", \"Cells: marker+\": \"#8ccb73\"} In\u00a0[4]: Copied!
df = pd.read_csv(filename, sep=\"\\t\")\ndisplay(df.head())\ndf = pd.read_csv(filename, sep=\"\\t\") display(df.head()) Image Object ID Object type Name Classification Parent ROI Atlas_X Atlas_Y Atlas_Z 0 animalid0_030.ome.tiff 5ff386a8-5abd-46d1-8e0d-f5c5365457c1 Detection NaN Cells: marker- VeCB Polygon 11.5230 4.2724 4.2767 1 animalid0_030.ome.tiff 9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 Detection NaN Cells: marker- VeCB Polygon 11.5202 4.2784 4.4186 2 animalid0_030.ome.tiff 481a519b-8b40-4450-9ec6-725181807d72 Detection NaN Cells: marker- VeCB Polygon 11.5060 4.3172 4.3563 3 animalid0_030.ome.tiff fd28e09c-2c64-4750-b026-cd99e3526a57 Detection NaN Cells: marker- VeCB Polygon 11.5284 4.2574 4.3364 4 animalid0_030.ome.tiff 3d9ce034-f2ed-4c73-99be-f782363cf323 Detection NaN Cells: marker- VeCB Polygon 11.5487 4.2033 4.2943
Here we can filter out classifications we don't wan't to display.
In\u00a0[5]: Copied!# select objects\n# df = df[df[\"Classification\"] == \"example: classification\"]\n# select objects # df = df[df[\"Classification\"] == \"example: classification\"] In\u00a0[6]: Copied!
# get outline coordinates in coronal (=frontal) orientation\ncoords_coronal = bgh.get_structures_slice_coords(\n regions,\n orientation=\"frontal\",\n atlas_name=atlas_name,\n position=(np.mean(ap_lims), 0, 0),\n)\n# get outline coordinates in sagittal orientation\ncoords_sagittal = bgh.get_structures_slice_coords(\n regions,\n orientation=\"sagittal\",\n atlas_name=atlas_name,\n position=(0, 0, np.mean(ml_lims)),\n)\n# get outline coordinates in top (=horizontal) orientation\ncoords_top = bgh.get_structures_slice_coords(\n regions,\n orientation=\"horizontal\",\n atlas_name=atlas_name,\n position=(0, np.mean(dv_lims), 0),\n)\n# get outline coordinates in coronal (=frontal) orientation coords_coronal = bgh.get_structures_slice_coords( regions, orientation=\"frontal\", atlas_name=atlas_name, position=(np.mean(ap_lims), 0, 0), ) # get outline coordinates in sagittal orientation coords_sagittal = bgh.get_structures_slice_coords( regions, orientation=\"sagittal\", atlas_name=atlas_name, position=(0, 0, np.mean(ml_lims)), ) # get outline coordinates in top (=horizontal) orientation coords_top = bgh.get_structures_slice_coords( regions, orientation=\"horizontal\", atlas_name=atlas_name, position=(0, np.mean(dv_lims), 0), ) In\u00a0[7]: Copied!
# Coronal projection\n# select objects within the rostro-caudal range\ndf_coronal = df[\n (df[\"Atlas_X\"] >= ap_lims[0] / 1000) & (df[\"Atlas_X\"] <= ap_lims[1] / 1000)\n]\n\nplt.figure()\n\nfor struct_name, contours in coords_coronal.items():\n for cont in contours:\n plt.fill(cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\")\n\n# see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize\nax = sns.kdeplot(\n df_coronal,\n x=\"Atlas_Z\",\n y=\"Atlas_Y\",\n hue=\"Classification\",\n levels=nlevels,\n common_norm=False,\n palette=palette,\n)\nax.invert_yaxis()\nsns.despine(left=True, bottom=True)\nplt.axis(\"equal\")\nplt.xlabel(None)\nplt.ylabel(None)\nplt.xticks([])\nplt.yticks([])\nplt.plot([2, 3], [8, 8], \"k\", linewidth=3)\nplt.text(2, 7.9, \"1 mm\")\n# Coronal projection # select objects within the rostro-caudal range df_coronal = df[ (df[\"Atlas_X\"] >= ap_lims[0] / 1000) & (df[\"Atlas_X\"] <= ap_lims[1] / 1000) ] plt.figure() for struct_name, contours in coords_coronal.items(): for cont in contours: plt.fill(cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\") # see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize ax = sns.kdeplot( df_coronal, x=\"Atlas_Z\", y=\"Atlas_Y\", hue=\"Classification\", levels=nlevels, common_norm=False, palette=palette, ) ax.invert_yaxis() sns.despine(left=True, bottom=True) plt.axis(\"equal\") plt.xlabel(None) plt.ylabel(None) plt.xticks([]) plt.yticks([]) plt.plot([2, 3], [8, 8], \"k\", linewidth=3) plt.text(2, 7.9, \"1 mm\") Out[7]:
Text(2, 7.9, '1 mm')In\u00a0[8]: Copied!
# Sagittal projection\n# select objects within the medio-lateral range\ndf_sagittal = df[\n (df[\"Atlas_Z\"] >= ml_lims[0] / 1000) & (df[\"Atlas_Z\"] <= ml_lims[1] / 1000)\n]\n\nplt.figure()\n\nfor struct_name, contours in coords_sagittal.items():\n for cont in contours:\n plt.fill(cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\")\n\n# see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize\nax = sns.kdeplot(\n df_sagittal,\n x=\"Atlas_X\",\n y=\"Atlas_Y\",\n hue=\"Classification\",\n levels=nlevels,\n common_norm=False,\n palette=palette,\n)\nax.invert_yaxis()\nsns.despine(left=True, bottom=True)\nplt.axis(\"equal\")\nplt.xlabel(None)\nplt.ylabel(None)\nplt.xticks([])\nplt.yticks([])\nplt.plot([2, 3], [7.1, 7.1], \"k\", linewidth=3)\nplt.text(2, 7, \"1 mm\")\n# Sagittal projection # select objects within the medio-lateral range df_sagittal = df[ (df[\"Atlas_Z\"] >= ml_lims[0] / 1000) & (df[\"Atlas_Z\"] <= ml_lims[1] / 1000) ] plt.figure() for struct_name, contours in coords_sagittal.items(): for cont in contours: plt.fill(cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\") # see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize ax = sns.kdeplot( df_sagittal, x=\"Atlas_X\", y=\"Atlas_Y\", hue=\"Classification\", levels=nlevels, common_norm=False, palette=palette, ) ax.invert_yaxis() sns.despine(left=True, bottom=True) plt.axis(\"equal\") plt.xlabel(None) plt.ylabel(None) plt.xticks([]) plt.yticks([]) plt.plot([2, 3], [7.1, 7.1], \"k\", linewidth=3) plt.text(2, 7, \"1 mm\") Out[8]:
Text(2, 7, '1 mm')In\u00a0[9]: Copied!
# Top projection\n# select objects within the dorso-ventral range\ndf_top = df[(df[\"Atlas_Y\"] >= dv_lims[0] / 1000) & (df[\"Atlas_Y\"] <= dv_lims[1] / 1000)]\n\nplt.figure()\n\nfor struct_name, contours in coords_top.items():\n for cont in contours:\n plt.fill(-cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\")\n\n# see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize\nax = sns.kdeplot(\n df_top,\n x=\"Atlas_Z\",\n y=\"Atlas_X\",\n hue=\"Classification\",\n levels=nlevels,\n common_norm=False,\n palette=palette,\n)\nax.invert_yaxis()\nsns.despine(left=True, bottom=True)\nplt.axis(\"equal\")\nplt.xlabel(None)\nplt.ylabel(None)\nplt.xticks([])\nplt.yticks([])\nplt.plot([0.5, 1.5], [0.5, 0.5], \"k\", linewidth=3)\nplt.text(0.5, 0.4, \"1 mm\")\n# Top projection # select objects within the dorso-ventral range df_top = df[(df[\"Atlas_Y\"] >= dv_lims[0] / 1000) & (df[\"Atlas_Y\"] <= dv_lims[1] / 1000)] plt.figure() for struct_name, contours in coords_top.items(): for cont in contours: plt.fill(-cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\") # see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize ax = sns.kdeplot( df_top, x=\"Atlas_Z\", y=\"Atlas_X\", hue=\"Classification\", levels=nlevels, common_norm=False, palette=palette, ) ax.invert_yaxis() sns.despine(left=True, bottom=True) plt.axis(\"equal\") plt.xlabel(None) plt.ylabel(None) plt.xticks([]) plt.yticks([]) plt.plot([0.5, 1.5], [0.5, 0.5], \"k\", linewidth=3) plt.text(0.5, 0.4, \"1 mm\") Out[9]:
Text(0.5, 0.4, '1 mm')In\u00a0[\u00a0]: Copied!
\n"},{"location":"demo_notebooks/fibers_coverage.html","title":"Fibers coverage","text":"
Plot regions coverage percentage in the spinal cord.
This showcases that any brainglobe atlases should be supported.
Here we're going to quantify the percentage of area of each spinal cord regions innervated by axons.
The \"area \u00b5m^2\" measurement for each annotations can be created in QuPath with a pixel classifier, using the Measure button.
We're going to consider that the \"area \u00b5m^2\" measurement generated by the pixel classifier is an object count. histoquant
computes a density, which is the count in each region divided by its aera. Therefore, in this case, it will be actually the fraction of area covered by fibers in a given color.
The data was generated using QuPath with a pixel classifier on toy data.
In\u00a0[1]: Copied!import pandas as pd\n\nimport cuisto\nimport pandas as pd import cuisto In\u00a0[2]: Copied!
# Full path to your configuration file, edited according to your need beforehand\nconfig_file = \"../../resources/demo_config_fibers.toml\"\n# Full path to your configuration file, edited according to your need beforehand config_file = \"../../resources/demo_config_fibers.toml\" In\u00a0[3]: Copied!
# - Files\n# not important if only one animal\nanimal = \"animalid1-SC\"\n# set the full path to the annotations tsv file from QuPath\nannotations_file = \"../../resources/fibers_measurements_annotations.tsv\"\n# - Files # not important if only one animal animal = \"animalid1-SC\" # set the full path to the annotations tsv file from QuPath annotations_file = \"../../resources/fibers_measurements_annotations.tsv\" In\u00a0[4]: Copied!
# get configuration\ncfg = cuisto.config.Config(config_file)\n# get configuration cfg = cuisto.config.Config(config_file) In\u00a0[5]: Copied!
# read data\ndf_annotations = pd.read_csv(annotations_file, index_col=\"Object ID\", sep=\"\\t\")\ndf_detections = pd.DataFrame() # empty DataFrame\n\n# remove annotations that are not brain regions\ndf_annotations = df_annotations[df_annotations[\"Classification\"] != \"Region*\"]\ndf_annotations = df_annotations[df_annotations[\"ROI\"] != \"Rectangle\"]\n\n# have a look\ndisplay(df_annotations.head())\n# read data df_annotations = pd.read_csv(annotations_file, index_col=\"Object ID\", sep=\"\\t\") df_detections = pd.DataFrame() # empty DataFrame # remove annotations that are not brain regions df_annotations = df_annotations[df_annotations[\"Classification\"] != \"Region*\"] df_annotations = df_annotations[df_annotations[\"ROI\"] != \"Rectangle\"] # have a look display(df_annotations.head()) Image Object type Name Classification Parent ROI Centroid X \u00b5m Centroid Y \u00b5m Fibers: EGFP area \u00b5m^2 Fibers: DsRed area \u00b5m^2 ID Side Parent ID Area \u00b5m^2 Perimeter \u00b5m Object ID dcfe5196-4e8d-4126-b255-a9ea393c383a animalid1-SC_s1.ome.tiff Annotation Root NaN Root object (Image) Geometry 1353.70 1060.00 108993.1953 15533.3701 NaN NaN NaN 3172474.0 9853.3 acc74bc0-3dd0-4b3e-86e3-e6c7b681d544 animalid1-SC_s1.ome.tiff Annotation root Right: root Root Polygon 864.44 989.95 39162.8906 5093.2798 250.0 0.0 NaN 1603335.7 4844.2 94571cf9-f22b-453f-860c-eb13d0e72440 animalid1-SC_s1.ome.tiff Annotation WM Right: WM root Geometry 791.00 1094.60 20189.0469 2582.4824 130.0 0.0 250.0 884002.0 7927.8 473d65fb-fda4-4721-ba6f-cc659efc1d5a animalid1-SC_s1.ome.tiff Annotation vf Right: vf WM Polygon 984.31 1599.00 6298.3574 940.4100 70.0 0.0 130.0 281816.9 2719.5 449e2cd1-eca2-4708-83fe-651f378c3a14 animalid1-SC_s1.ome.tiff Annotation df Right: df WM Polygon 1242.90 401.26 1545.0750 241.3800 74.0 0.0 130.0 152952.8 1694.4 In\u00a0[6]: Copied!
# get distributions per regions, spatial distributions and coordinates\ndf_regions, dfs_distributions, df_coordinates = cuisto.process.process_animal(\n animal, df_annotations, df_detections, cfg, compute_distributions=False\n)\n\n# convert the \"density \u00b5m^-2\" column, which is actually the coverage fraction, to a percentage\ndf_regions[\"density \u00b5m^-2\"] = df_regions[\"density \u00b5m^-2\"] * 100\n\n# have a look\ndisplay(df_regions.head())\n# get distributions per regions, spatial distributions and coordinates df_regions, dfs_distributions, df_coordinates = cuisto.process.process_animal( animal, df_annotations, df_detections, cfg, compute_distributions=False ) # convert the \"density \u00b5m^-2\" column, which is actually the coverage fraction, to a percentage df_regions[\"density \u00b5m^-2\"] = df_regions[\"density \u00b5m^-2\"] * 100 # have a look display(df_regions.head()) Name hemisphere Area \u00b5m^2 Area mm^2 area \u00b5m^2 area mm^2 density \u00b5m^-2 density mm^-2 coverage index relative count relative density channel animal 0 10Sp Contra. 1749462.18 1.749462 53117.3701 53.11737 3.036211 30362.113973 1612.755645 0.036535 0.033062 Negative animalid1-SC 0 10Sp Contra. 1749462.18 1.749462 5257.1025 5.257103 0.300498 3004.98208 15.797499 0.030766 0.02085 Positive animalid1-SC 1 10Sp Ipsi. 1439105.93 1.439106 64182.9823 64.182982 4.459921 44599.206328 2862.51007 0.023524 0.023265 Negative animalid1-SC 1 10Sp Ipsi. 1439105.93 1.439106 8046.3375 8.046337 0.559121 5591.205854 44.988729 0.028911 0.022984 Positive animalid1-SC 2 10Sp both 3188568.11 3.188568 117300.3524 117.300352 3.678778 36787.783216 4315.219935 0.028047 0.025734 Negative animalid1-SC In\u00a0[7]: Copied!
# plot distributions per regions\nfig_regions = cuisto.display.plot_regions(df_regions, cfg)\n# specify which regions to plot\n# fig_regions = hq.display.plot_regions(df_regions, cfg, names_list=[\"Rh9\", \"Sr9\", \"8Sp\"])\n\n# save as svg\n# fig_regions[0].savefig(r\"C:\\Users\\glegoc\\Downloads\\nice_figure.svg\")\n# plot distributions per regions fig_regions = cuisto.display.plot_regions(df_regions, cfg) # specify which regions to plot # fig_regions = hq.display.plot_regions(df_regions, cfg, names_list=[\"Rh9\", \"Sr9\", \"8Sp\"]) # save as svg # fig_regions[0].savefig(r\"C:\\Users\\glegoc\\Downloads\\nice_figure.svg\")"},{"location":"demo_notebooks/fibers_length_multi.html","title":"Fibers length in multi animals","text":"In\u00a0[1]: Copied!
import cuisto\nimport cuisto In\u00a0[2]: Copied!
# Full path to your configuration file, edited according to your need beforehand\nconfig_file = \"../../resources/demo_config_multi.toml\"\n# Full path to your configuration file, edited according to your need beforehand config_file = \"../../resources/demo_config_multi.toml\" In\u00a0[3]: Copied!
# Files\nwdir = \"../../resources/multi\"\nanimals = [\"mouse0\", \"mouse1\"]\n# Files wdir = \"../../resources/multi\" animals = [\"mouse0\", \"mouse1\"] In\u00a0[4]: Copied!
# get configuration\ncfg = cuisto.Config(config_file)\n# get configuration cfg = cuisto.Config(config_file) In\u00a0[5]: Copied!
# get distributions per regions\ndf_regions, _, _ = cuisto.process.process_animals(\n wdir, animals, cfg, compute_distributions=False\n)\n\n# have a look\ndisplay(df_regions.head(10))\n# get distributions per regions df_regions, _, _ = cuisto.process.process_animals( wdir, animals, cfg, compute_distributions=False ) # have a look display(df_regions.head(10))
Processing mouse1: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2/2 [00:00<00:00, 15.66it/s]\nName hemisphere Area \u00b5m^2 Area mm^2 length \u00b5m length mm density \u00b5m^-1 density mm^-1 coverage index relative count relative density channel animal 0 ACVII Contra. 9099.04 0.009099 468.0381 0.468038 0.051438 51438.184688 24.07503 0.00064 0.022168 marker3 mouse0 1 ACVII Contra. 9099.04 0.009099 4260.4844 4.260484 0.468234 468234.495068 1994.905762 0.0019 0.056502 marker2 mouse0 2 ACVII Contra. 9099.04 0.009099 5337.7103 5.33771 0.586623 586623.45698 3131.226069 0.010104 0.242734 marker1 mouse0 3 ACVII Ipsi. 4609.90 0.004610 0.0 0.0 0.0 0.0 0.0 0.0 0.0 marker3 mouse0 4 ACVII Ipsi. 4609.90 0.004610 0.0 0.0 0.0 0.0 0.0 0.0 0.0 marker2 mouse0 5 ACVII Ipsi. 4609.90 0.004610 0.0 0.0 0.0 0.0 0.0 0.0 0.0 marker1 mouse0 6 ACVII both 13708.94 0.013709 468.0381 0.468038 0.034141 34141.086036 15.979329 0.000284 0.011001 marker3 mouse0 7 ACVII both 13708.94 0.013709 4260.4844 4.260484 0.310781 310781.460857 1324.079566 0.000934 0.030688 marker2 mouse0 8 ACVII both 13708.94 0.013709 5337.7103 5.33771 0.38936 389359.811918 2078.289878 0.00534 0.142623 marker1 mouse0 9 AMB Contra. 122463.80 0.122464 30482.7815 30.482782 0.248913 248912.588863 7587.548059 0.041712 0.107271 marker3 mouse0 In\u00a0[6]: Copied!
figs_regions = cuisto.display.plot_regions(df_regions, cfg)\nfigs_regions = cuisto.display.plot_regions(df_regions, cfg)"},{"location":"demo_notebooks/fibers_length_multi.html#fibers-length-in-multi-animals","title":"Fibers length in multi animals\u00b6","text":"
This example uses synthetic data to showcase how histoquant
can be used in a pipeline.
Annotations measurements should be exported from QuPath, following the required directory structure.
Alternatively, you can merge all your CSV files yourself, one per animal, adding an animal ID to each table. Those can be processed with the histoquant.process.process_animal()
function, in a loop, collecting the results at each iteration and finally concatenating the results. Finally, those can be used with display
module. See the API reference for the process
module.
Info
The documentation is under construction.
cuisto
is a Python package aiming at quantifying histological data.
After ABBA registration of 2D histological slices and QuPath objects' detection, cuisto
is used to :
This documentation contains cuisto
installation instructions, ABBA installation instructions, guides to prepare images for the pipeline, detect objects with QuPath, register 2D slices on a 3D atlas with ABBA, along with examples.
In theory, cuisto
should work with any measurements table with the required columns, but has been designed with ABBA and QuPath in mind.
Due to the IT environment of the laboratory, this documentation is very Windows-oriented but most of it should be applicable to Linux and MacOS as well by slightly adapting terminal commands.
"},{"location":"index.html#documentation-navigation","title":"Documentation navigation","text":"The documentation outline is on the left panel, you can click on items to browse it. In each page, you'll get the table of contents on the right panel.
"},{"location":"index.html#useful-external-resources","title":"Useful external resources","text":"cuisto
has been primarly developed by Guillaume Le Goc in Julien Bouvier's lab at NeuroPSI. The clever name was found by Aur\u00e9lie Bodeau.
The documentation itself is built with MkDocs using the Material theme.
"},{"location":"api-compute.html","title":"cuisto.compute","text":"compute module, part of cuisto.
Contains actual computation functions.
"},{"location":"api-compute.html#cuisto.compute.get_distribution","title":"get_distribution(df, col, hue, hue_filter, per_commonnorm, binlim, nbins=100)
","text":"Computes distribution of objects.
A global distribution using only col
is computed, then it computes a distribution distinguishing values in the hue
column. For the latter, it is possible to use a subset of the data ony, based on another column using hue_filter
. This another column is determined with hue
, if the latter is \"hemisphere\", then hue_filter
is used in the \"channel\" color and vice-versa. per_commonnorm
controls how they are normalized, either as a whole (True) or independantly (False).
Use cases : (1) single-channel, two hemispheres : col=x
, hue=hemisphere
, hue_filter=\"\"
, per_commonorm=True
. Computes a distribution for each hemisphere, the sum of the area of both is equal to 1. (2) three-channels, one hemisphere : col=x
, hue=channel
, hue_filter=\"Ipsi.\", per_commonnorm=False
. Computes a distribution for each channel only for points in the ipsilateral hemisphere. Each curve will have an area of 1.
Parameters:
Name Type Description Defaultdf
DataFrame
required col
str
Key in df
, used to compute the distributions.
hue
str
Key in df
. Criterion for additional distributions.
hue_filter
str
Further filtering for \"per\" distribution. - hue = channel : value is the name of one of the hemisphere - hue = hemisphere : value can be the name of a channel, a list of such or \"all\"
requiredper_commonnorm
bool
Use common normalization for all hues (per argument).
requiredbinlim
list or tuple
First bin left edge and last bin right edge.
requirednbins
int
Number of bins. Default is 100.
100
Returns:
Name Type Descriptiondf_distribution
DataFrame
DataFrame with bins
, distribution
, count
and their per-hemisphere or per-channel variants.
cuisto/compute.py
def get_distribution(\n df: pd.DataFrame,\n col: str,\n hue: str,\n hue_filter: dict,\n per_commonnorm: bool,\n binlim: tuple | list,\n nbins=100,\n) -> pd.DataFrame:\n \"\"\"\n Computes distribution of objects.\n\n A global distribution using only `col` is computed, then it computes a distribution\n distinguishing values in the `hue` column. For the latter, it is possible to use a\n subset of the data ony, based on another column using `hue_filter`. This another\n column is determined with `hue`, if the latter is \"hemisphere\", then `hue_filter` is\n used in the \"channel\" color and vice-versa.\n `per_commonnorm` controls how they are normalized, either as a whole (True) or\n independantly (False).\n\n Use cases :\n (1) single-channel, two hemispheres : `col=x`, `hue=hemisphere`, `hue_filter=\"\"`,\n `per_commonorm=True`. Computes a distribution for each hemisphere, the sum of the\n area of both is equal to 1.\n (2) three-channels, one hemisphere : `col=x`, hue=`channel`,\n `hue_filter=\"Ipsi.\", per_commonnorm=False`. Computes a distribution for each channel\n only for points in the ipsilateral hemisphere. Each curve will have an area of 1.\n\n Parameters\n ----------\n df : pandas.DataFrame\n col : str\n Key in `df`, used to compute the distributions.\n hue : str\n Key in `df`. Criterion for additional distributions.\n hue_filter : str\n Further filtering for \"per\" distribution.\n - hue = channel : value is the name of one of the hemisphere\n - hue = hemisphere : value can be the name of a channel, a list of such or \"all\"\n per_commonnorm : bool\n Use common normalization for all hues (per argument).\n binlim : list or tuple\n First bin left edge and last bin right edge.\n nbins : int, optional\n Number of bins. Default is 100.\n\n Returns\n -------\n df_distribution : pandas.DataFrame\n DataFrame with `bins`, `distribution`, `count` and their per-hemisphere or\n per-channel variants.\n\n \"\"\"\n\n # - Preparation\n bin_edges = np.linspace(*binlim, nbins + 1) # create bins\n df_distribution = [] # prepare list of distributions\n\n # - Both hemispheres, all channels\n # get raw count per bins (histogram)\n count, bin_edges = np.histogram(df[col], bin_edges)\n # get normalized count (pdf)\n distribution, _ = np.histogram(df[col], bin_edges, density=True)\n # get bin centers rather than edges to plot them\n bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2\n\n # make a DataFrame out of that\n df_distribution.append(\n pd.DataFrame(\n {\n \"bins\": bin_centers,\n \"distribution\": distribution,\n \"count\": count,\n \"hemisphere\": \"both\",\n \"channel\": \"all\",\n \"axis\": col, # keep track of what col. was used\n }\n )\n )\n\n # - Per additional criterion\n # select data\n df_sub = select_hemisphere_channel(df, hue, hue_filter, False)\n hue_values = df[hue].unique() # get grouping values\n # total number of datapoints in the subset used for additional distribution\n length_total = len(df_sub)\n\n for value in hue_values:\n # select part and coordinates\n df_part = df_sub.loc[df_sub[hue] == value, col]\n\n # get raw count per bins (histogram)\n count, bin_edges = np.histogram(df_part, bin_edges)\n # get normalized count (pdf)\n distribution, _ = np.histogram(df_part, bin_edges, density=True)\n\n if per_commonnorm:\n # re-normalize so that the sum of areas of all sub-parts is 1\n length_part = len(df_part) # number of datapoints in that hemisphere\n distribution *= length_part / length_total\n\n # get bin centers rather than edges to plot them\n bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2\n\n # make a DataFrame out of that\n df_distribution.append(\n pd.DataFrame(\n {\n \"bins\": bin_centers,\n \"distribution\": distribution,\n \"count\": count,\n hue: value,\n \"channel\" if hue == \"hemisphere\" else \"hemisphere\": hue_filter,\n \"axis\": col, # keep track of what col. was used\n }\n )\n )\n\n return pd.concat(df_distribution)\n
"},{"location":"api-compute.html#cuisto.compute.get_regions_metrics","title":"get_regions_metrics(df_annotations, object_type, channel_names, meas_base_name, metrics_names)
","text":"Get a new DataFrame with cumulated axons segments length in each brain regions.
This is the quantification per brain regions for fibers-like objects, eg. axons. The returned DataFrame has columns \"cum. length \u00b5m\", \"cum. length mm\", \"density \u00b5m^-1\", \"density mm^-1\", \"coverage index\".
Parameters:
Name Type Description Defaultdf_annotations
DataFrame
DataFrame with an entry for each brain regions, with columns \"Area \u00b5m^2\", \"Name\", \"hemisphere\", and \"{object_type: channel} Length \u00b5m\".
requiredobject_type
str
Object type (primary classification).
requiredchannel_names
dict
Map between original channel names to something else.
requiredmeas_base_name
str
required metrics_names
dict
required Returns:
Name Type Descriptiondf_regions
DataFrame
DataFrame with brain regions name, area and metrics.
Source code incuisto/compute.py
def get_regions_metrics(\n df_annotations: pd.DataFrame,\n object_type: str,\n channel_names: dict,\n meas_base_name: str,\n metrics_names: dict,\n) -> pd.DataFrame:\n \"\"\"\n Get a new DataFrame with cumulated axons segments length in each brain regions.\n\n This is the quantification per brain regions for fibers-like objects, eg. axons. The\n returned DataFrame has columns \"cum. length \u00b5m\", \"cum. length mm\", \"density \u00b5m^-1\",\n \"density mm^-1\", \"coverage index\".\n\n Parameters\n ----------\n df_annotations : pandas.DataFrame\n DataFrame with an entry for each brain regions, with columns \"Area \u00b5m^2\",\n \"Name\", \"hemisphere\", and \"{object_type: channel} Length \u00b5m\".\n object_type : str\n Object type (primary classification).\n channel_names : dict\n Map between original channel names to something else.\n meas_base_name : str\n metrics_names : dict\n\n Returns\n -------\n df_regions : pandas.DataFrame\n DataFrame with brain regions name, area and metrics.\n\n \"\"\"\n # get columns names\n cols = df_annotations.columns\n # get columns with fibers lengths\n cols_colors = cols[\n cols.str.startswith(object_type) & cols.str.endswith(meas_base_name)\n ]\n # select relevant data\n cols_to_select = pd.Index([\"Name\", \"hemisphere\", \"Area \u00b5m^2\"]).append(cols_colors)\n # sum lengths and areas of each brain regions\n df_regions = (\n df_annotations[cols_to_select]\n .groupby([\"Name\", \"hemisphere\"])\n .sum()\n .reset_index()\n )\n\n # get measurement for both hemispheres (sum)\n df_both = df_annotations[cols_to_select].groupby([\"Name\"]).sum().reset_index()\n df_both[\"hemisphere\"] = \"both\"\n df_regions = (\n pd.concat([df_regions, df_both], ignore_index=True)\n .sort_values(by=\"Name\")\n .reset_index()\n .drop(columns=\"index\")\n )\n\n # rename measurement columns to lower case\n df_regions = df_regions.rename(\n columns={\n k: k.replace(meas_base_name, meas_base_name.lower()) for k in cols_colors\n }\n )\n\n # update names\n meas_base_name = meas_base_name.lower()\n cols = df_regions.columns\n cols_colors = cols[\n cols.str.startswith(object_type) & cols.str.endswith(meas_base_name)\n ]\n\n # convert area in mm^2\n df_regions[\"Area mm^2\"] = df_regions[\"Area \u00b5m^2\"] / 1e6\n\n # prepare metrics\n if \"\u00b5m\" in meas_base_name:\n # fibers : convert to mm\n cols_to_convert = pd.Index([col for col in cols_colors if \"\u00b5m\" in col])\n df_regions[cols_to_convert.str.replace(\"\u00b5m\", \"mm\")] = (\n df_regions[cols_to_convert] / 1000\n )\n metrics = [meas_base_name, meas_base_name.replace(\"\u00b5m\", \"mm\")]\n else:\n # objects : count\n metrics = [meas_base_name]\n\n # density = measurement / area\n metric = metrics_names[\"density \u00b5m^-2\"]\n df_regions[cols_colors.str.replace(meas_base_name, metric)] = df_regions[\n cols_colors\n ].divide(df_regions[\"Area \u00b5m^2\"], axis=0)\n metrics.append(metric)\n metric = metrics_names[\"density mm^-2\"]\n df_regions[cols_colors.str.replace(meas_base_name, metric)] = df_regions[\n cols_colors\n ].divide(df_regions[\"Area mm^2\"], axis=0)\n metrics.append(metric)\n\n # coverage index = measurement\u00b2 / area\n metric = metrics_names[\"coverage index\"]\n df_regions[cols_colors.str.replace(meas_base_name, metric)] = (\n df_regions[cols_colors].pow(2).divide(df_regions[\"Area \u00b5m^2\"], axis=0)\n )\n metrics.append(metric)\n\n # prepare relative metrics columns\n metric = metrics_names[\"relative measurement\"]\n cols_rel_meas = cols_colors.str.replace(meas_base_name, metric)\n df_regions[cols_rel_meas] = np.nan\n metrics.append(metric)\n metric = metrics_names[\"relative density\"]\n cols_dens = cols_colors.str.replace(meas_base_name, metrics_names[\"density mm^-2\"])\n cols_rel_dens = cols_colors.str.replace(meas_base_name, metric)\n df_regions[cols_rel_dens] = np.nan\n metrics.append(metric)\n # relative metrics should be defined within each hemispheres (left, right, both)\n for hemisphere in df_regions[\"hemisphere\"].unique():\n row_indexer = df_regions[\"hemisphere\"] == hemisphere\n\n # relative measurement = measurement / total measurement\n df_regions.loc[row_indexer, cols_rel_meas] = (\n df_regions.loc[row_indexer, cols_colors]\n .divide(df_regions.loc[row_indexer, cols_colors].sum())\n .to_numpy()\n )\n\n # relative density = density / total density\n df_regions.loc[row_indexer, cols_rel_dens] = (\n df_regions.loc[\n row_indexer,\n cols_dens,\n ]\n .divide(df_regions.loc[row_indexer, cols_dens].sum())\n .to_numpy()\n )\n\n # collect channel names\n channels = (\n cols_colors.str.replace(object_type + \": \", \"\")\n .str.replace(\" \" + meas_base_name, \"\")\n .values.tolist()\n )\n # collect measurements columns names\n cols_metrics = df_regions.columns.difference(\n pd.Index([\"Name\", \"hemisphere\", \"Area \u00b5m^2\", \"Area mm^2\"])\n )\n for metric in metrics:\n cols_to_cat = [f\"{object_type}: {cn} {metric}\" for cn in channels]\n # make sure it's part of available metrics\n if not set(cols_to_cat) <= set(cols_metrics):\n raise ValueError(f\"{cols_to_cat} not in DataFrame.\")\n # group all colors in the same colors\n df_regions[metric] = df_regions[cols_to_cat].values.tolist()\n # remove original data\n df_regions = df_regions.drop(columns=cols_to_cat)\n\n # add a color tag, given their names in the configuration file\n df_regions[\"channel\"] = len(df_regions) * [[channel_names[k] for k in channels]]\n metrics.append(\"channel\")\n\n # explode the dataframe so that each color has an entry\n df_regions = df_regions.explode(metrics)\n\n return df_regions\n
"},{"location":"api-compute.html#cuisto.compute.normalize_starter_cells","title":"normalize_starter_cells(df, cols, animal, info_file, channel_names)
","text":"Normalize data by the number of starter cells.
Parameters:
Name Type Description Defaultdf
DataFrame
Contains the data to be normalized.
requiredcols
list - like
Columns to divide by the number of starter cells.
requiredanimal
str
Animal ID to parse the number of starter cells.
requiredinfo_file
str
Full path to the TOML file with informations.
requiredchannel_names
dict
Map between original channel names to something else.
requiredReturns:
Type DescriptionDataFrame
Same df
with normalized count.
cuisto/compute.py
def normalize_starter_cells(\n df: pd.DataFrame, cols: list[str], animal: str, info_file: str, channel_names: dict\n) -> pd.DataFrame:\n \"\"\"\n Normalize data by the number of starter cells.\n\n Parameters\n ----------\n df : pd.DataFrame\n Contains the data to be normalized.\n cols : list-like\n Columns to divide by the number of starter cells.\n animal : str\n Animal ID to parse the number of starter cells.\n info_file : str\n Full path to the TOML file with informations.\n channel_names : dict\n Map between original channel names to something else.\n\n Returns\n -------\n pd.DataFrame\n Same `df` with normalized count.\n\n \"\"\"\n for channel in df[\"channel\"].unique():\n # inverse mapping channel colors : names\n reverse_channels = {v: k for k, v in channel_names.items()}\n nstarters = get_starter_cells(animal, reverse_channels[channel], info_file)\n\n for col in cols:\n df.loc[df[\"channel\"] == channel, col] = (\n df.loc[df[\"channel\"] == channel, col] / nstarters\n )\n\n return df\n
"},{"location":"api-config-config.html","title":"Api config config","text":"object_type
: name of QuPath base classification (eg. without the \": subclass\" part) segmentation_tag
: type of segmentation, matches directory name, used only in the full pipeline
Information related to the atlas used
name
: brainglobe-atlasapi atlas name type
: \"brain\" or \"cord\" (eg. registration done in ABBA or abba_python). This will determine whether to flip Left/Right when determining detections hemisphere based on their coordinates. Also adapts the axes in the 2D heatmaps. midline
: midline Z coordinates (left/right limit) in microns to determine detections hemisphere based on their coordinates. outline_structures
: structures to show an outline of in heatmaps
Information related to imaging channels
namesMust contain all classifications derived from \"object_type\" you want to process. In the form subclassification name = name to display on the plots
\"marker+\"
: classification name = name to display \"marker-\"
: add any number of sub-classification
Must have same keys as \"names\" keys, in the form subclassification name = color
, with color specified as a matplotlib named color, an RGB list or an hex code.
\"marker+\"
: classification name = matplotlib color \"marker-\"
: must have the same entries as \"names\".
Information related to hemispheres, same structure as channels
namesLeft
: Left = name to display Right
: Right = name to display
Must have same keys as names' keys
Left
: ff516e\" # Left = matplotlib color (either #hex, color name or RGB list) Right
: 960010\" # Right = matplotlib color
Spatial distributions parameters
stereo
: use stereotaxic coordinates (as in Paxinos, only for mouse brain CCFv3) ap_lim
: bins limits for anterio-posterior in mm ap_nbins
: number of bins for anterio-posterior dv_lim
: bins limits for dorso-ventral in mm dv_nbins
: number of bins for dorso-ventral ml_lim
: bins limits for medio-lateral in mm ml_nbins
: number of bins for medio-lateral hue
: color curves with this parameter, must be \"hemisphere\" or \"channel\" hue_filter
: use only a subset of data
common_norm
: use a global normalization (eg. the sum of areas under all curves is 1). Otherwise, normalize each hue individually
Display parameters
show_injection
: add a patch showing the extent of injection sites. Uses corresponding channel colors. Requires the information TOML configuration file set up cmap
: matplotlib color map for 2D heatmaps cmap_nbins
: number of bins for 2D heatmaps cmap_lim
: color limits for 2D heatmaps
Distributions per regions parameters
base_measurement
: the name of the measurement in QuPath to derive others from. Usually \"Count\" or \"Length \u00b5m\" hue
: color bars with this parameter, must be \"hemisphere\" or \"channel\" hue_filter
: use only a subset of data
hue_mirror
: plot two hue_filter in mirror instead of discarding the others. For example, if hue=channel and hue_filter=\"both\", plots the two hemisphere in mirror. normalize_starter_cells
: normalize non-relative metrics by the number of starter cells
Names of metrics. The keys are used internally in cuisto as is so should NOT be modified. The values will only chang etheir names in the ouput file
\"density \u00b5m^-2\"
: relevant name \"density mm^-2\"
: relevant name \"coverage index\"
: relevant name \"relative measurement\"
: relevant name \"relative density\"
: relevant name
nregions
: number of regions to display (sorted by max.) orientation
: orientation of the bars (\"h\" or \"v\") order
: order the regions by \"ontology\" or by \"max\". Set to \"max\" to provide a custom order dodge
: enforce the bar not being stacked log_scale
: use log. scale for metrics
name of metrics to display
\"count\"
: real_name = display_name, with real_name the \"values\" in [regions.metrics] \"density mm^-2\"
Full path to information TOML files and atlas outlines for 2D heatmaps.
blacklist
fusion
outlines
infos
config module, part of cuisto.
Contains the Config class.
"},{"location":"api-config.html#cuisto.config.Config","title":"Config(config_file)
","text":"The configuration class.
Reads input configuration file and provides its constant.
Parameters:
Name Type Description Defaultconfig_file
str
Full path to the configuration file to load.
requiredReturns:
Name Type Descriptioncfg
Config object.
Constructor.
Source code incuisto/config.py
def __init__(self, config_file):\n \"\"\"Constructor.\"\"\"\n with open(config_file, \"rb\") as fid:\n cfg = tomllib.load(fid)\n\n for key in cfg:\n setattr(self, key, cfg[key])\n\n self.config_file = config_file\n self.bg_atlas = BrainGlobeAtlas(self.atlas[\"name\"], check_latest=False)\n self.get_blacklist()\n self.get_leaves_list()\n
"},{"location":"api-config.html#cuisto.config.Config.get_blacklist","title":"get_blacklist()
","text":"Wraps cuisto.utils.get_blacklist.
Source code incuisto/config.py
def get_blacklist(self):\n \"\"\"Wraps cuisto.utils.get_blacklist.\"\"\"\n\n self.atlas[\"blacklist\"] = utils.get_blacklist(\n self.files[\"blacklist\"], self.bg_atlas\n )\n
"},{"location":"api-config.html#cuisto.config.Config.get_hue_palette","title":"get_hue_palette(mode)
","text":"Get color palette given hue.
Maps hue to colors in channels or hemispheres.
Parameters:
Name Type Description Defaultmode
(hemisphere, channel)
\"hemisphere\"
Returns:
Name Type Descriptionpalette
dict
Maps a hue level to a color, usable in seaborn.
Source code incuisto/config.py
def get_hue_palette(self, mode: str) -> dict:\n \"\"\"\n Get color palette given hue.\n\n Maps hue to colors in channels or hemispheres.\n\n Parameters\n ----------\n mode : {\"hemisphere\", \"channel\"}\n\n Returns\n -------\n palette : dict\n Maps a hue level to a color, usable in seaborn.\n\n \"\"\"\n params = getattr(self, mode)\n\n if params[\"hue\"] == \"channel\":\n # replace channels by their new names\n palette = {\n self.channels[\"names\"][k]: v for k, v in self.channels[\"colors\"].items()\n }\n elif params[\"hue\"] == \"hemisphere\":\n # replace hemispheres by their new names\n palette = {\n self.hemispheres[\"names\"][k]: v\n for k, v in self.hemispheres[\"colors\"].items()\n }\n else:\n palette = None\n warnings.warn(f\"hue={self.regions[\"display\"][\"hue\"]} not supported.\")\n\n return palette\n
"},{"location":"api-config.html#cuisto.config.Config.get_injection_sites","title":"get_injection_sites(animals)
","text":"Get list of injection sites coordinates for each animals, for each channels.
Parameters:
Name Type Description Defaultanimals
list of str
List of animals.
requiredReturns:
Name Type Descriptioninjection_sites
dict
{\"x\": {channel0: [x]}, \"y\": {channel1: [y]}}
Source code incuisto/config.py
def get_injection_sites(self, animals: list[str]) -> dict:\n \"\"\"\n Get list of injection sites coordinates for each animals, for each channels.\n\n Parameters\n ----------\n animals : list of str\n List of animals.\n\n Returns\n -------\n injection_sites : dict\n {\"x\": {channel0: [x]}, \"y\": {channel1: [y]}}\n\n \"\"\"\n injection_sites = {\n axis: {channel: [] for channel in self.channels[\"names\"].keys()}\n for axis in [\"x\", \"y\", \"z\"]\n }\n\n for animal in animals:\n for channel in self.channels[\"names\"].keys():\n injx, injy, injz = utils.get_injection_site(\n animal,\n self.files[\"infos\"],\n channel,\n stereo=self.distributions[\"stereo\"],\n )\n if injx is not None:\n injection_sites[\"x\"][channel].append(injx)\n if injy is not None:\n injection_sites[\"y\"][channel].append(injy)\n if injz is not None:\n injection_sites[\"z\"][channel].append(injz)\n\n return injection_sites\n
"},{"location":"api-config.html#cuisto.config.Config.get_leaves_list","title":"get_leaves_list()
","text":"Wraps utils.get_leaves_list.
Source code incuisto/config.py
def get_leaves_list(self):\n \"\"\"Wraps utils.get_leaves_list.\"\"\"\n\n self.atlas[\"leaveslist\"] = utils.get_leaves_list(self.bg_atlas)\n
"},{"location":"api-display.html","title":"cuisto.display","text":"display module, part of cuisto.
Contains display functions, essentially wrapping matplotlib and seaborn functions.
"},{"location":"api-display.html#cuisto.display.add_data_coverage","title":"add_data_coverage(df, ax, colors=None, **kwargs)
","text":"Add lines below the plot to represent data coverage.
Parameters:
Name Type Description Defaultdf
DataFrame
DataFrame with X_min
and X_max
on rows for each animals (on columns).
ax
Axes
Handle to axes where to add the patch.
requiredcolors
list or str or None
Colors for the patches, as a RGB list or hex list. Should be the same size as the number of patches to plot, eg. the number of columns in df
. If None, default seaborn colors are used. If only one element, used for each animal.
None
**kwargs
passed to patches.Rectangle()
{}
Returns:
Name Type Descriptionax
Axes
Handle to updated axes.
Source code incuisto/display.py
def add_data_coverage(\n df: pd.DataFrame, ax: plt.Axes, colors: list | str | None = None, **kwargs\n) -> plt.Axes:\n \"\"\"\n Add lines below the plot to represent data coverage.\n\n Parameters\n ----------\n df : pandas.DataFrame\n DataFrame with `X_min` and `X_max` on rows for each animals (on columns).\n ax : Axes\n Handle to axes where to add the patch.\n colors : list or str or None, optional\n Colors for the patches, as a RGB list or hex list. Should be the same size as\n the number of patches to plot, eg. the number of columns in `df`. If None,\n default seaborn colors are used. If only one element, used for each animal.\n **kwargs : passed to patches.Rectangle()\n\n Returns\n -------\n ax : Axes\n Handle to updated axes.\n\n \"\"\"\n # get colors\n ncolumns = len(df.columns)\n if not colors:\n colors = sns.color_palette(n_colors=ncolumns)\n elif isinstance(colors, str) or (isinstance(colors, list) & (len(colors) == 3)):\n colors = [colors] * ncolumns\n elif len(colors) != ncolumns:\n warnings.warn(f\"Wrong number of colors ({len(colors)}), using default colors.\")\n colors = sns.color_palette(n_colors=ncolumns)\n\n # get patch height depending on current axis limits\n ymin, ymax = ax.get_ylim()\n height = (ymax - ymin) * 0.02\n\n for animal, color in zip(df.columns, colors):\n # get patch coordinates\n ymin, ymax = ax.get_ylim()\n ylength = ymax - ymin\n ybottom = ymin - 0.02 * ylength\n xleft = df.loc[\"X_min\", animal]\n xright = df.loc[\"X_max\", animal]\n\n # plot patch\n ax.add_patch(\n patches.Rectangle(\n (xleft, ybottom),\n xright - xleft,\n height,\n label=animal,\n color=color,\n **kwargs,\n )\n )\n\n ax.autoscale(tight=True) # set new axes limits\n\n ax.autoscale() # reset scale\n\n return ax\n
"},{"location":"api-display.html#cuisto.display.add_injection_patch","title":"add_injection_patch(X, ax, **kwargs)
","text":"Add a patch representing the injection sites.
The patch will span from the minimal coordinate to the maximal. If plotted in stereotaxic coordinates, coordinates should be converted beforehand.
Parameters:
Name Type Description DefaultX
list
Coordinates in mm for each animals. Can be empty to not plot anything.
requiredax
Axes
Handle to axes where to add the patch.
required**kwargs
passed to Axes.axvspan
{}
Returns:
Name Type Descriptionax
Axes
Handle to updated Axes.
Source code incuisto/display.py
def add_injection_patch(X: list, ax: plt.Axes, **kwargs) -> plt.Axes:\n \"\"\"\n Add a patch representing the injection sites.\n\n The patch will span from the minimal coordinate to the maximal.\n If plotted in stereotaxic coordinates, coordinates should be converted beforehand.\n\n Parameters\n ----------\n X : list\n Coordinates in mm for each animals. Can be empty to not plot anything.\n ax : Axes\n Handle to axes where to add the patch.\n **kwargs : passed to Axes.axvspan\n\n Returns\n -------\n ax : Axes\n Handle to updated Axes.\n\n \"\"\"\n # plot patch\n if len(X) > 0:\n ax.axvspan(min(X), max(X), **kwargs)\n\n return ax\n
"},{"location":"api-display.html#cuisto.display.draw_structure_outline","title":"draw_structure_outline(view='sagittal', structures=['root'], outline_file='', ax=None, microns=False, **kwargs)
","text":"Plot brain regions outlines in given projection.
This requires a file containing the structures outlines.
Parameters:
Name Type Description Defaultview
str
Projection, \"sagittal\", \"coronal\" or \"top\". Default is \"sagittal\".
'sagittal'
structures
list[str]
List of structures acronyms whose outlines will be drawn. Default is [\"root\"].
['root']
outline_file
str
Full path the outlines HDF5 file.
''
ax
Axes or None
Axes where to plot the outlines. If None, get current axes (the default).
None
microns
bool
If False (default), converts the coordinates in mm.
False
**kwargs
passed to pyplot.plot()
{}
Returns:
Name Type Descriptionax
Axes
Source code in cuisto/display.py
def draw_structure_outline(\n view: str = \"sagittal\",\n structures: list[str] = [\"root\"],\n outline_file: str = \"\",\n ax: plt.Axes | None = None,\n microns: bool = False,\n **kwargs,\n) -> plt.Axes:\n \"\"\"\n Plot brain regions outlines in given projection.\n\n This requires a file containing the structures outlines.\n\n Parameters\n ----------\n view : str\n Projection, \"sagittal\", \"coronal\" or \"top\". Default is \"sagittal\".\n structures : list[str]\n List of structures acronyms whose outlines will be drawn. Default is [\"root\"].\n outline_file : str\n Full path the outlines HDF5 file.\n ax : plt.Axes or None, optional\n Axes where to plot the outlines. If None, get current axes (the default).\n microns : bool, optional\n If False (default), converts the coordinates in mm.\n **kwargs : passed to pyplot.plot()\n\n Returns\n -------\n ax : plt.Axes\n\n \"\"\"\n # get axes\n if not ax:\n ax = plt.gca()\n\n # get units\n if microns:\n conv = 1\n else:\n conv = 1 / 1000\n\n with h5py.File(outline_file) as f:\n if view == \"sagittal\":\n for structure in structures:\n dsets = f[\"sagittal\"][structure]\n\n for dset in dsets.values():\n ax.plot(dset[:, 0] * conv, dset[:, 1] * conv, **kwargs)\n\n if view == \"coronal\":\n for structure in structures:\n dsets = f[\"coronal\"][structure]\n\n for dset in dsets.values():\n ax.plot(dset[:, 0] * conv, dset[:, 1] * conv, **kwargs)\n\n if view == \"top\":\n for structure in structures:\n dsets = f[\"top\"][structure]\n\n for dset in dsets.values():\n ax.plot(dset[:, 0] * conv, dset[:, 1] * conv, **kwargs)\n\n return ax\n
"},{"location":"api-display.html#cuisto.display.nice_bar_plot","title":"nice_bar_plot(df, x='', y=[''], hue='', ylabel=[''], orient='h', nx=None, ordering=None, names_list=None, hue_mirror=False, log_scale=False, bar_kws={}, pts_kws={})
","text":"Nice bar plot of per-region objects distribution.
This is used for objects distribution across brain regions. Shows the y
metric (count, aeral density, cumulated length...) in each x
categories (brain regions). orient
controls wether the bars are shown horizontally (default) or vertically. Input df
must have an additional \"hemisphere\" column. All y
are plotted in the same figure as different subplots. nx
controls the number of displayed regions.
Parameters:
Name Type Description Defaultdf
DataFrame
required x
str
Key in df
.
''
y
str
Key in df
.
''
hue
str
Key in df
.
''
ylabel
list of str
Y axis labels.
['']
orient
h or v
\"h\" for horizontal bars (default) or \"v\" for vertical bars.
'h'
nx
None or int
Number of x
to show in the plot. Default is None (no limit).
None
ordering
None or list[str] or max
Sorted list of acronyms. Data will be sorted follwowing this order, if \"max\", sorted by descending values, if None, not sorted (default).
None
names_list
list or None
List of names to display. If None (default), takes the most prominent overall ones.
None
hue_mirror
bool
If there are 2 groups, plot in mirror. Default is False.
False
log_scale
bool
Set the metrics in log scale. Default is False.
False
bar_kws
dict
Passed to seaborn.barplot().
{}
pts_kws
dict
Passed to seaborn.stripplot().
{}
Returns:
Name Type Descriptionfigs
list
List of figures.
Source code incuisto/display.py
def nice_bar_plot(\n df: pd.DataFrame,\n x: str = \"\",\n y: list[str] = [\"\"],\n hue: str = \"\",\n ylabel: list[str] = [\"\"],\n orient=\"h\",\n nx: None | int = None,\n ordering: None | list[str] | str = None,\n names_list: None | list = None,\n hue_mirror: bool = False,\n log_scale: bool = False,\n bar_kws: dict = {},\n pts_kws: dict = {},\n) -> list[plt.Axes]:\n \"\"\"\n Nice bar plot of per-region objects distribution.\n\n This is used for objects distribution across brain regions. Shows the `y` metric\n (count, aeral density, cumulated length...) in each `x` categories (brain regions).\n `orient` controls wether the bars are shown horizontally (default) or vertically.\n Input `df` must have an additional \"hemisphere\" column. All `y` are plotted in the\n same figure as different subplots. `nx` controls the number of displayed regions.\n\n Parameters\n ----------\n df : pandas.DataFrame\n x, y, hue : str\n Key in `df`.\n ylabel : list of str\n Y axis labels.\n orient : \"h\" or \"v\", optional\n \"h\" for horizontal bars (default) or \"v\" for vertical bars.\n nx : None or int, optional\n Number of `x` to show in the plot. Default is None (no limit).\n ordering : None or list[str] or \"max\", optional\n Sorted list of acronyms. Data will be sorted follwowing this order, if \"max\",\n sorted by descending values, if None, not sorted (default).\n names_list : list or None, optional\n List of names to display. If None (default), takes the most prominent overall\n ones.\n hue_mirror : bool, optional\n If there are 2 groups, plot in mirror. Default is False.\n log_scale : bool, optional\n Set the metrics in log scale. Default is False.\n bar_kws : dict\n Passed to seaborn.barplot().\n pts_kws : dict\n Passed to seaborn.stripplot().\n\n Returns\n -------\n figs : list\n List of figures.\n\n \"\"\"\n figs = []\n # loop for each features\n for yi, ylabeli in zip(y, ylabel):\n # prepare data\n # get nx first most prominent regions\n if not names_list:\n names_list_plt = (\n df.groupby([\"Name\"])[yi].mean().sort_values(ascending=False).index[0:nx]\n )\n else:\n names_list_plt = names_list\n dfplt = df[df[\"Name\"].isin(names_list_plt)] # limit to those regions\n # limit hierarchy list if provided\n if isinstance(ordering, list):\n order = [el for el in ordering if el in names_list_plt]\n elif ordering == \"max\":\n order = names_list_plt\n else:\n order = None\n\n # reorder keys depending on orientation and create axes\n if orient == \"h\":\n xp = yi\n yp = x\n if hue_mirror:\n nrows = 1\n ncols = 2\n sharex = None\n sharey = \"all\"\n else:\n nrows = 1\n ncols = 1\n sharex = None\n sharey = None\n elif orient == \"v\":\n xp = x\n yp = yi\n if hue_mirror:\n nrows = 2\n ncols = 1\n sharex = \"all\"\n sharey = None\n else:\n nrows = 1\n ncols = 1\n sharex = None\n sharey = None\n fig, axs = plt.subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey)\n\n if hue_mirror:\n # two graphs\n ax1, ax2 = axs\n # determine what will be mirrored\n if hue == \"channel\":\n hue_filter = \"hemisphere\"\n elif hue == \"hemisphere\":\n hue_filter = \"channel\"\n # select the two types (should be left/right or two channels)\n hue_filters = dfplt[hue_filter].unique()[0:2]\n hue_filters.sort() # make sure it will be always in the same order\n\n # plot\n for filt, ax in zip(hue_filters, [ax1, ax2]):\n dfplt2 = dfplt[dfplt[hue_filter] == filt]\n ax = sns.barplot(\n dfplt2,\n x=xp,\n y=yp,\n hue=hue,\n estimator=\"mean\",\n errorbar=\"se\",\n orient=orient,\n order=order,\n ax=ax,\n **bar_kws,\n )\n # add points\n ax = sns.stripplot(\n dfplt2, x=xp, y=yp, hue=hue, legend=False, ax=ax, **pts_kws\n )\n\n # cosmetics\n if orient == \"h\":\n ax.set_title(f\"{hue_filter}: {filt}\")\n ax.set_ylabel(None)\n ax.set_ylim((nx + 0.5, -0.5))\n if log_scale:\n ax.set_xscale(\"log\")\n\n elif orient == \"v\":\n if ax == ax1:\n # top title\n ax1.set_title(f\"{hue_filter}: {filt}\")\n ax.set_xlabel(None)\n elif ax == ax2:\n # use xlabel as bottom title\n ax2.set_xlabel(\n f\"{hue_filter}: {filt}\", fontsize=ax1.title.get_fontsize()\n )\n ax.set_xlim((-0.5, nx + 0.5))\n if log_scale:\n ax.set_yscale(\"log\")\n\n for label in ax.get_xticklabels():\n label.set_verticalalignment(\"center\")\n label.set_horizontalalignment(\"center\")\n\n # tune axes cosmetics\n if orient == \"h\":\n ax1.set_xlabel(ylabeli)\n ax2.set_xlabel(ylabeli)\n ax1.set_xlim(\n ax1.get_xlim()[0], max((ax1.get_xlim()[1], ax2.get_xlim()[1]))\n )\n ax2.set_xlim(\n ax2.get_xlim()[0], max((ax1.get_xlim()[1], ax2.get_xlim()[1]))\n )\n ax1.invert_xaxis()\n sns.despine(ax=ax1, left=True, top=True, right=False, bottom=False)\n sns.despine(ax=ax2, left=False, top=True, right=True, bottom=False)\n ax1.yaxis.tick_right()\n ax1.tick_params(axis=\"y\", pad=20)\n for label in ax1.get_yticklabels():\n label.set_verticalalignment(\"center\")\n label.set_horizontalalignment(\"center\")\n elif orient == \"v\":\n ax2.set_ylabel(ylabeli)\n ax1.set_ylim(\n ax1.get_ylim()[0], max((ax1.get_ylim()[1], ax2.get_ylim()[1]))\n )\n ax2.set_ylim(\n ax2.get_ylim()[0], max((ax1.get_ylim()[1], ax2.get_ylim()[1]))\n )\n ax2.invert_yaxis()\n sns.despine(ax=ax1, left=False, top=True, right=True, bottom=False)\n sns.despine(ax=ax2, left=False, top=False, right=True, bottom=True)\n for label in ax2.get_xticklabels():\n label.set_verticalalignment(\"center\")\n label.set_horizontalalignment(\"center\")\n ax2.tick_params(axis=\"x\", labelrotation=90, pad=20)\n\n else:\n # one graph\n ax = axs\n # plot\n ax = sns.barplot(\n dfplt,\n x=xp,\n y=yp,\n hue=hue,\n estimator=\"mean\",\n errorbar=\"se\",\n orient=orient,\n order=order,\n ax=ax,\n **bar_kws,\n )\n # add points\n ax = sns.stripplot(\n dfplt, x=xp, y=yp, hue=hue, legend=False, ax=ax, **pts_kws\n )\n\n # cosmetics\n if orient == \"h\":\n ax.set_xlabel(ylabeli)\n ax.set_ylabel(None)\n ax.set_ylim((nx + 0.5, -0.5))\n if log_scale:\n ax.set_xscale(\"log\")\n elif orient == \"v\":\n ax.set_xlabel(None)\n ax.set_ylabel(ylabeli)\n ax.set_xlim((-0.5, nx + 0.5))\n if log_scale:\n ax.set_yscale(\"log\")\n\n fig.tight_layout(pad=0)\n figs.append(fig)\n\n return figs\n
"},{"location":"api-display.html#cuisto.display.nice_distribution_plot","title":"nice_distribution_plot(df, x='', y='', hue=None, xlabel='', ylabel='', injections_sites={}, channel_colors={}, channel_names={}, ax=None, **kwargs)
","text":"Nice plot of 1D distribution of objects.
Parameters:
Name Type Description Defaultdf
DataFrame
required x
str
Keys in df
.
''
y
str
Keys in df
.
''
hue
str or None
Key in df
. If None, no hue is used.
None
xlabel
str
X and Y axes labels.
''
ylabel
str
X and Y axes labels.
''
injections_sites
dict
List of injection sites 1D coordinates in a dict with the channel name as key. If empty, injection site is not plotted (default).
{}
channel_colors
dict
Required if injections_sites is not empty, dict mapping channel names to a color.
{}
channel_names
dict
Required if injections_sites is not empty, dict mapping channel names to a display name.
{}
ax
Axes or None
Axes in which to plot the figure, if None, a new figure is created (default).
None
**kwargs
passed to seaborn.lineplot()
{}
Returns:
Name Type Descriptionax
matplotlib axes
Handle to axes.
Source code incuisto/display.py
def nice_distribution_plot(\n df: pd.DataFrame,\n x: str = \"\",\n y: str = \"\",\n hue: str | None = None,\n xlabel: str = \"\",\n ylabel: str = \"\",\n injections_sites: dict = {},\n channel_colors: dict = {},\n channel_names: dict = {},\n ax: plt.Axes | None = None,\n **kwargs,\n) -> plt.Axes:\n \"\"\"\n Nice plot of 1D distribution of objects.\n\n Parameters\n ----------\n df : pandas.DataFrame\n x, y : str\n Keys in `df`.\n hue : str or None, optional\n Key in `df`. If None, no hue is used.\n xlabel, ylabel : str\n X and Y axes labels.\n injections_sites : dict, optional\n List of injection sites 1D coordinates in a dict with the channel name as key.\n If empty, injection site is not plotted (default).\n channel_colors : dict, optional\n Required if injections_sites is not empty, dict mapping channel names to a\n color.\n channel_names : dict, optional\n Required if injections_sites is not empty, dict mapping channel names to a\n display name.\n ax : Axes or None, optional\n Axes in which to plot the figure, if None, a new figure is created (default).\n **kwargs : passed to seaborn.lineplot()\n\n Returns\n -------\n ax : matplotlib axes\n Handle to axes.\n\n \"\"\"\n if not ax:\n # create figure\n _, ax = plt.subplots(figsize=(10, 6))\n\n ax = sns.lineplot(\n df,\n x=x,\n y=y,\n hue=hue,\n estimator=\"mean\",\n errorbar=\"se\",\n ax=ax,\n **kwargs,\n )\n\n for channel in injections_sites.keys():\n ax = add_injection_patch(\n injections_sites[channel],\n ax,\n color=channel_colors[channel],\n edgecolor=None,\n alpha=0.25,\n label=channel_names[channel] + \": inj. site\",\n )\n\n ax.legend()\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n return ax\n
"},{"location":"api-display.html#cuisto.display.nice_heatmap","title":"nice_heatmap(df, animals, x='', y='', xlabel='', ylabel='', invertx=False, inverty=False, **kwargs)
","text":"Nice plots of 2D distribution of boutons as a heatmap per animal.
Parameters:
Name Type Description Defaultdf
DataFrame
required animals
list-like of str
List of animals.
requiredx
str
Keys in df
.
''
y
str
Keys in df
.
''
xlabel
str
Labels of x and y axes.
''
ylabel
str
Labels of x and y axes.
''
invertx
bool
Wether to inverse the x or y axes. Default is False.
False
inverty
bool
Wether to inverse the x or y axes. Default is False.
False
**kwargs
passed to seaborn.histplot()
{}
Returns:
Name Type Descriptionax
Axes or list of Axes
Handle to axes.
Source code incuisto/display.py
def nice_heatmap(\n df: pd.DataFrame,\n animals: tuple[str] | list[str],\n x: str = \"\",\n y: str = \"\",\n xlabel: str = \"\",\n ylabel: str = \"\",\n invertx: bool = False,\n inverty: bool = False,\n **kwargs,\n) -> list[plt.Axes] | plt.Axes:\n \"\"\"\n Nice plots of 2D distribution of boutons as a heatmap per animal.\n\n Parameters\n ----------\n df : pandas.DataFrame\n animals : list-like of str\n List of animals.\n x, y : str\n Keys in `df`.\n xlabel, ylabel : str\n Labels of x and y axes.\n invertx, inverty : bool, optional\n Wether to inverse the x or y axes. Default is False.\n **kwargs : passed to seaborn.histplot()\n\n Returns\n -------\n ax : Axes or list of Axes\n Handle to axes.\n\n \"\"\"\n\n # 2D distribution, per animal\n _, axs = plt.subplots(len(animals), 1, sharex=\"all\")\n\n for animal, ax in zip(animals, axs):\n ax = sns.histplot(\n df[df[\"animal\"] == animal],\n x=x,\n y=y,\n ax=ax,\n **kwargs,\n )\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_title(animal)\n\n if inverty:\n ax.invert_yaxis()\n\n if invertx:\n axs[-1].invert_xaxis() # only once since all x axes are shared\n\n return axs\n
"},{"location":"api-display.html#cuisto.display.nice_joint_plot","title":"nice_joint_plot(df, x='', y='', xlabel='', ylabel='', invertx=False, inverty=False, outline_kws={}, ax=None, **kwargs)
","text":"Joint distribution.
Used to display a 2D heatmap of objects. This is more qualitative than quantitative, for display purposes.
Parameters:
Name Type Description Defaultdf
DataFrame
required x
str
Keys in df
.
''
y
str
Keys in df
.
''
xlabel
str
Label of x and y axes.
''
ylabel
str
Label of x and y axes.
''
invertx
bool
Whether to inverse the x or y axes. Default is False for both.
False
inverty
bool
Whether to inverse the x or y axes. Default is False for both.
False
outline_kws
dict
Passed to draw_structure_outline().
{}
ax
Axes or None
Axes to plot in. If None, draws in current axes (default).
None
**kwargs
Passed to seaborn.histplot.
{}
Returns:
Name Type Descriptionax
Axes
Source code in cuisto/display.py
def nice_joint_plot(\n df: pd.DataFrame,\n x: str = \"\",\n y: str = \"\",\n xlabel: str = \"\",\n ylabel: str = \"\",\n invertx: bool = False,\n inverty: bool = False,\n outline_kws: dict = {},\n ax: plt.Axes | None = None,\n **kwargs,\n) -> plt.Figure:\n \"\"\"\n Joint distribution.\n\n Used to display a 2D heatmap of objects. This is more qualitative than quantitative,\n for display purposes.\n\n Parameters\n ----------\n df : pandas.DataFrame\n x, y : str\n Keys in `df`.\n xlabel, ylabel : str\n Label of x and y axes.\n invertx, inverty : bool, optional\n Whether to inverse the x or y axes. Default is False for both.\n outline_kws : dict\n Passed to draw_structure_outline().\n ax : plt.Axes or None, optional\n Axes to plot in. If None, draws in current axes (default).\n **kwargs\n Passed to seaborn.histplot.\n\n Returns\n -------\n ax : plt.Axes\n\n \"\"\"\n if not ax:\n ax = plt.gca()\n\n # plot outline\n draw_structure_outline(ax=ax, **outline_kws)\n\n # plot joint distribution\n sns.histplot(\n df,\n x=x,\n y=y,\n ax=ax,\n **kwargs,\n )\n\n # adjust axes\n if invertx:\n ax.invert_xaxis()\n if inverty:\n ax.invert_yaxis()\n\n # labels\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n return ax\n
"},{"location":"api-display.html#cuisto.display.plot_1D_distributions","title":"plot_1D_distributions(dfs_distributions, cfg, df_coordinates=None)
","text":"Wraps nice_distribution_plot().
Source code incuisto/display.py
def plot_1D_distributions(\n dfs_distributions: list[pd.DataFrame],\n cfg,\n df_coordinates: pd.DataFrame = None,\n):\n \"\"\"\n Wraps nice_distribution_plot().\n \"\"\"\n # prepare figures\n fig, axs_dist = plt.subplots(1, 3, sharey=True, figsize=(13, 6))\n xlabels = [\n \"Rostro-caudal position (mm)\",\n \"Dorso-ventral position (mm)\",\n \"Medio-lateral position (mm)\",\n ]\n\n # get animals\n animals = []\n for df in dfs_distributions:\n animals.extend(df[\"animal\"].unique())\n animals = set(animals)\n\n # get injection sites\n if cfg.distributions[\"display\"][\"show_injection\"]:\n injection_sites = cfg.get_injection_sites(animals)\n else:\n injection_sites = {k: {} for k in range(3)}\n\n # get color palette based on hue\n hue = cfg.distributions[\"hue\"]\n palette = cfg.get_hue_palette(\"distributions\")\n\n # loop through each axis\n for df_dist, ax_dist, xlabel, inj_sites in zip(\n dfs_distributions, axs_dist, xlabels, injection_sites.values()\n ):\n # select data\n if cfg.distributions[\"hue\"] == \"hemisphere\":\n dfplt = df_dist[df_dist[\"hemisphere\"] != \"both\"]\n elif cfg.distributions[\"hue\"] == \"channel\":\n dfplt = df_dist[df_dist[\"channel\"] != \"all\"]\n\n # plot\n ax_dist = nice_distribution_plot(\n dfplt,\n x=\"bins\",\n y=\"distribution\",\n hue=hue,\n xlabel=xlabel,\n ylabel=\"normalized distribution\",\n injections_sites=inj_sites,\n channel_colors=cfg.channels[\"colors\"],\n channel_names=cfg.channels[\"names\"],\n linewidth=2,\n palette=palette,\n ax=ax_dist,\n )\n\n # add data coverage\n if (\"Atlas_AP\" in df_dist[\"axis\"].unique()) & (df_coordinates is not None):\n df_coverage = utils.get_data_coverage(df_coordinates)\n ax_dist = add_data_coverage(df_coverage, ax_dist, edgecolor=None, alpha=0.5)\n ax_dist.legend()\n else:\n ax_dist.legend().remove()\n\n # - Distributions, per animal\n if len(animals) > 1:\n _, axs_dist = plt.subplots(1, 3, sharey=True)\n\n # loop through each axis\n for df_dist, ax_dist, xlabel, inj_sites in zip(\n dfs_distributions, axs_dist, xlabels, injection_sites.values()\n ):\n # select data\n df_dist_plot = df_dist[df_dist[\"hemisphere\"] == \"both\"]\n\n # plot\n ax_dist = nice_distribution_plot(\n df_dist_plot,\n x=\"bins\",\n y=\"distribution\",\n hue=\"animal\",\n xlabel=xlabel,\n ylabel=\"normalized distribution\",\n injections_sites=inj_sites,\n channel_colors=cfg.channels[\"colors\"],\n channel_names=cfg.channels[\"names\"],\n linewidth=2,\n ax=ax_dist,\n )\n\n return fig\n
"},{"location":"api-display.html#cuisto.display.plot_2D_distributions","title":"plot_2D_distributions(df, cfg)
","text":"Wraps nice_joint_plot().
Source code incuisto/display.py
def plot_2D_distributions(df: pd.DataFrame, cfg):\n \"\"\"\n Wraps nice_joint_plot().\n \"\"\"\n # -- 2D heatmap, all animals pooled\n # prepare figure\n fig_heatmap = plt.figure(figsize=(12, 9))\n\n ax_sag = fig_heatmap.add_subplot(2, 2, 1)\n ax_cor = fig_heatmap.add_subplot(2, 2, 2, sharey=ax_sag)\n ax_top = fig_heatmap.add_subplot(2, 2, 3, sharex=ax_sag)\n ax_cbar = fig_heatmap.add_subplot(2, 2, 4, box_aspect=15)\n\n # prepare options\n map_options = dict(\n bins=cfg.distributions[\"display\"][\"cmap_nbins\"],\n cmap=cfg.distributions[\"display\"][\"cmap\"],\n rasterized=True,\n thresh=10,\n stat=\"count\",\n vmin=cfg.distributions[\"display\"][\"cmap_lim\"][0],\n vmax=cfg.distributions[\"display\"][\"cmap_lim\"][1],\n )\n outline_kws = dict(\n structures=cfg.atlas[\"outline_structures\"],\n outline_file=cfg.files[\"outlines\"],\n linewidth=1.5,\n color=\"k\",\n )\n cbar_kws = dict(label=\"count\")\n\n # determine which axes are going to be inverted\n if cfg.atlas[\"type\"] == \"brain\":\n cor_invertx = True\n cor_inverty = False\n top_invertx = True\n top_inverty = False\n elif cfg.atlas[\"type\"] == \"cord\":\n cor_invertx = False\n cor_inverty = False\n top_invertx = True\n top_inverty = True\n\n # - sagittal\n # no need to invert axes because they are shared with the two other views\n outline_kws[\"view\"] = \"sagittal\"\n nice_joint_plot(\n df,\n x=\"Atlas_X\",\n y=\"Atlas_Y\",\n xlabel=\"Rostro-caudal (mm)\",\n ylabel=\"Dorso-ventral (mm)\",\n outline_kws=outline_kws,\n ax=ax_sag,\n **map_options,\n )\n\n # - coronal\n outline_kws[\"view\"] = \"coronal\"\n nice_joint_plot(\n df,\n x=\"Atlas_Z\",\n y=\"Atlas_Y\",\n xlabel=\"Medio-lateral (mm)\",\n ylabel=\"Dorso-ventral (mm)\",\n invertx=cor_invertx,\n inverty=cor_inverty,\n outline_kws=outline_kws,\n ax=ax_cor,\n **map_options,\n )\n ax_cor.invert_yaxis()\n\n # - top\n outline_kws[\"view\"] = \"top\"\n nice_joint_plot(\n df,\n x=\"Atlas_X\",\n y=\"Atlas_Z\",\n xlabel=\"Rostro-caudal (mm)\",\n ylabel=\"Medio-lateral (mm)\",\n invertx=top_invertx,\n inverty=top_inverty,\n outline_kws=outline_kws,\n ax=ax_top,\n cbar=True,\n cbar_ax=ax_cbar,\n cbar_kws=cbar_kws,\n **map_options,\n )\n fig_heatmap.suptitle(\"sagittal, coronal and top-view projections\")\n\n # -- 2D heatmap per animals\n # get animals\n animals = df[\"animal\"].unique()\n if len(animals) > 1:\n # Rostro-caudal, dorso-ventral (sagittal)\n _ = nice_heatmap(\n df,\n animals,\n x=\"Atlas_X\",\n y=\"Atlas_Y\",\n xlabel=\"Rostro-caudal (mm)\",\n ylabel=\"Dorso-ventral (mm)\",\n invertx=True,\n inverty=True,\n cmap=\"OrRd\",\n rasterized=True,\n cbar=True,\n )\n\n # Medio-lateral, dorso-ventral (coronal)\n _ = nice_heatmap(\n df,\n animals,\n x=\"Atlas_Z\",\n y=\"Atlas_Y\",\n xlabel=\"Medio-lateral (mm)\",\n ylabel=\"Dorso-ventral (mm)\",\n inverty=True,\n invertx=True,\n cmap=\"OrRd\",\n rasterized=True,\n )\n\n return fig_heatmap\n
"},{"location":"api-display.html#cuisto.display.plot_regions","title":"plot_regions(df, cfg, **kwargs)
","text":"Wraps nice_bar_plot().
Source code incuisto/display.py
def plot_regions(df: pd.DataFrame, cfg, **kwargs):\n \"\"\"\n Wraps nice_bar_plot().\n \"\"\"\n # get regions order\n if cfg.regions[\"display\"][\"order\"] == \"ontology\":\n regions_order = [d[\"acronym\"] for d in cfg.bg_atlas.structures_list]\n elif cfg.regions[\"display\"][\"order\"] == \"max\":\n regions_order = \"max\"\n else:\n regions_order = None\n\n # determine metrics to be plotted and color palette based on hue\n metrics = [*cfg.regions[\"display\"][\"metrics\"].keys()]\n hue = cfg.regions[\"hue\"]\n palette = cfg.get_hue_palette(\"regions\")\n\n # select data\n dfplt = utils.select_hemisphere_channel(\n df, hue, cfg.regions[\"hue_filter\"], cfg.regions[\"hue_mirror\"]\n )\n\n # prepare options\n bar_kws = dict(\n err_kws={\"linewidth\": 1.5},\n dodge=cfg.regions[\"display\"][\"dodge\"],\n palette=palette,\n )\n pts_kws = dict(\n size=4,\n edgecolor=\"auto\",\n linewidth=0.75,\n dodge=cfg.regions[\"display\"][\"dodge\"],\n palette=palette,\n )\n # draw\n figs = nice_bar_plot(\n dfplt,\n x=\"Name\",\n y=metrics,\n hue=hue,\n ylabel=[*cfg.regions[\"display\"][\"metrics\"].values()],\n orient=cfg.regions[\"display\"][\"orientation\"],\n nx=cfg.regions[\"display\"][\"nregions\"],\n ordering=regions_order,\n hue_mirror=cfg.regions[\"hue_mirror\"],\n log_scale=cfg.regions[\"display\"][\"log_scale\"],\n bar_kws=bar_kws,\n pts_kws=pts_kws,\n **kwargs,\n )\n\n return figs\n
"},{"location":"api-io.html","title":"cuisto.io","text":"io module, part of cuisto.
Contains loading and saving functions.
"},{"location":"api-io.html#cuisto.io.cat_csv_dir","title":"cat_csv_dir(directory, **kwargs)
","text":"Scans a directory for csv files and concatenate them into a single DataFrame.
Parameters:
Name Type Description Defaultdirectory
str
Path to the directory to scan.
required**kwargs
passed to pandas.read_csv()
{}
Returns:
Name Type Descriptiondf
DataFrame
All CSV files concatenated in a single DataFrame.
Source code incuisto/io.py
def cat_csv_dir(directory, **kwargs) -> pd.DataFrame:\n \"\"\"\n Scans a directory for csv files and concatenate them into a single DataFrame.\n\n Parameters\n ----------\n directory : str\n Path to the directory to scan.\n **kwargs : passed to pandas.read_csv()\n\n Returns\n -------\n df : pandas.DataFrame\n All CSV files concatenated in a single DataFrame.\n\n \"\"\"\n return pd.concat(\n pd.read_csv(\n os.path.join(directory, filename),\n **kwargs,\n )\n for filename in os.listdir(directory)\n if (filename.endswith(\".csv\"))\n and not check_empty_file(os.path.join(directory, filename), threshold=1)\n )\n
"},{"location":"api-io.html#cuisto.io.cat_data_dir","title":"cat_data_dir(directory, segtype, **kwargs)
","text":"Wraps either cat_csv_dir() or cat_json_dir() depending on segtype
.
Parameters:
Name Type Description Defaultdirectory
str
Path to the directory to scan.
requiredsegtype
str
\"synaptophysin\" or \"fibers\".
required**kwargs
passed to cat_csv_dir() or cat_json_dir().
{}
Returns:
Name Type Descriptiondf
DataFrame
All files concatenated in a single DataFrame.
Source code incuisto/io.py
def cat_data_dir(directory: str, segtype: str, **kwargs) -> pd.DataFrame:\n \"\"\"\n Wraps either cat_csv_dir() or cat_json_dir() depending on `segtype`.\n\n Parameters\n ----------\n directory : str\n Path to the directory to scan.\n segtype : str\n \"synaptophysin\" or \"fibers\".\n **kwargs : passed to cat_csv_dir() or cat_json_dir().\n\n Returns\n -------\n df : pd.DataFrame\n All files concatenated in a single DataFrame.\n\n \"\"\"\n if segtype in CSV_KW:\n # remove kwargs for json\n kwargs.pop(\"hemisphere_names\", None)\n kwargs.pop(\"atlas\", None)\n return cat_csv_dir(directory, **kwargs)\n elif segtype in JSON_KW:\n kwargs = {k: kwargs[k] for k in [\"hemisphere_names\", \"atlas\"] if k in kwargs}\n return cat_json_dir(directory, **kwargs)\n else:\n raise ValueError(\n f\"'{segtype}' not supported, unable to determine if CSV or JSON.\"\n )\n
"},{"location":"api-io.html#cuisto.io.cat_json_dir","title":"cat_json_dir(directory, hemisphere_names, atlas)
","text":"Scans a directory for json files and concatenate them in a single DataFrame.
The json files must be generated with 'workflow_import_export.groovy\" from a QuPath project.
Parameters:
Name Type Description Defaultdirectory
str
Path to the directory to scan.
requiredhemisphere_names
dict
Maps between hemisphere names in the json files (\"Right\" and \"Left\") to something else (eg. \"Ipsi.\" and \"Contra.\").
requiredatlas
BrainGlobeAtlas
Atlas to read regions from.
requiredReturns:
Name Type Descriptiondf
DataFrame
All JSON files concatenated in a single DataFrame.
Source code incuisto/io.py
def cat_json_dir(\n directory: str, hemisphere_names: dict, atlas: BrainGlobeAtlas\n) -> pd.DataFrame:\n \"\"\"\n Scans a directory for json files and concatenate them in a single DataFrame.\n\n The json files must be generated with 'workflow_import_export.groovy\" from a QuPath\n project.\n\n Parameters\n ----------\n directory : str\n Path to the directory to scan.\n hemisphere_names : dict\n Maps between hemisphere names in the json files (\"Right\" and \"Left\") to\n something else (eg. \"Ipsi.\" and \"Contra.\").\n atlas : BrainGlobeAtlas\n Atlas to read regions from.\n\n Returns\n -------\n df : pd.DataFrame\n All JSON files concatenated in a single DataFrame.\n\n \"\"\"\n # list files\n files_list = [\n os.path.join(directory, filename)\n for filename in os.listdir(directory)\n if (filename.endswith(\".json\"))\n ]\n\n data = [] # prepare list of DataFrame\n for filename in files_list:\n with open(filename, \"rb\") as fid:\n df = pd.DataFrame.from_dict(\n orjson.loads(fid.read())[\"paths\"], orient=\"index\"\n )\n df[\"Image\"] = os.path.basename(filename).split(\"_detections\")[0]\n data.append(df)\n\n df = (\n pd.concat(data)\n .explode(\n [\"x\", \"y\", \"z\", \"hemisphere\"]\n ) # get an entry for each point of segments\n .reset_index()\n .rename(\n columns=dict(\n x=\"Atlas_X\",\n y=\"Atlas_Y\",\n z=\"Atlas_Z\",\n index=\"Object ID\",\n classification=\"Classification\",\n )\n )\n .set_index(\"Object ID\")\n )\n\n # change hemisphere names\n df[\"hemisphere\"] = df[\"hemisphere\"].map(hemisphere_names)\n\n # add object type\n df[\"Object type\"] = \"Detection\"\n\n # add brain regions\n df = utils.add_brain_region(df, atlas, col=\"Parent\")\n\n return df\n
"},{"location":"api-io.html#cuisto.io.check_empty_file","title":"check_empty_file(filename, threshold=1)
","text":"Checks if a file is empty.
Empty is defined as a file whose number of lines is lower than or equal to threshold
(to allow for headers).
Parameters:
Name Type Description Defaultfilename
str
Full path to the file to check.
requiredthreshold
int
If number of lines is lower than or equal to this value, it is considered as empty. Default is 1.
1
Returns:
Name Type Descriptionempty
bool
True if the file is empty as defined above.
Source code incuisto/io.py
def check_empty_file(filename: str, threshold: int = 1) -> bool:\n \"\"\"\n Checks if a file is empty.\n\n Empty is defined as a file whose number of lines is lower than or equal to\n `threshold` (to allow for headers).\n\n Parameters\n ----------\n filename : str\n Full path to the file to check.\n threshold : int, optional\n If number of lines is lower than or equal to this value, it is considered as\n empty. Default is 1.\n\n Returns\n -------\n empty : bool\n True if the file is empty as defined above.\n\n \"\"\"\n with open(filename, \"rb\") as fid:\n nlines = sum(1 for _ in fid)\n\n if nlines <= threshold:\n return True\n else:\n return False\n
"},{"location":"api-io.html#cuisto.io.get_measurements_directory","title":"get_measurements_directory(wdir, animal, kind, segtype)
","text":"Get the directory with detections or annotations measurements for given animal ID.
Parameters:
Name Type Description Defaultwdir
str
Base working directory.
requiredanimal
str
Animal ID.
requiredkind
str
\"annotation\" or \"detection\".
requiredsegtype
str
Type of segmentation, eg. \"synaptophysin\".
requiredReturns:
Name Type Descriptiondirectory
str
Path to detections or annotations directory.
Source code incuisto/io.py
def get_measurements_directory(wdir, animal: str, kind: str, segtype: str) -> str:\n \"\"\"\n Get the directory with detections or annotations measurements for given animal ID.\n\n Parameters\n ----------\n wdir : str\n Base working directory.\n animal : str\n Animal ID.\n kind : str\n \"annotation\" or \"detection\".\n segtype : str\n Type of segmentation, eg. \"synaptophysin\".\n\n Returns\n -------\n directory : str\n Path to detections or annotations directory.\n\n \"\"\"\n bdir = os.path.join(wdir, animal, animal.lower() + \"_segmentation\", segtype)\n\n if (kind == \"detection\") or (kind == \"detections\"):\n return os.path.join(bdir, \"detections\")\n elif (kind == \"annotation\") or (kind == \"annotations\"):\n return os.path.join(bdir, \"annotations\")\n else:\n raise ValueError(\n f\"kind = '{kind}' not supported. Choose 'detection' or 'annotation'.\"\n )\n
"},{"location":"api-io.html#cuisto.io.load_dfs","title":"load_dfs(filepath, fmt, identifiers=['df_regions', 'df_coordinates', 'df_distribution_ap', 'df_distribution_dv', 'df_distribution_ml'])
","text":"Load DataFrames from file.
If fmt
is \"h5\" (\"xslx\"), identifiers are interpreted as h5 group identifier (sheet name, respectively). If fmt
is \"pickle\", \"csv\" or \"tsv\", identifiers are appended to filename
. Path to the file can't have a dot (\".\") in it.
Parameters:
Name Type Description Defaultfilepath
str
Full path to the file(s), without extension.
requiredfmt
(h5, csv, pickle, xlsx)
File(s) format.
\"h5\"
identifiers
list of str
List of identifiers to load from files. Defaults to the ones saved in cuisto.process.process_animals().
['df_regions', 'df_coordinates', 'df_distribution_ap', 'df_distribution_dv', 'df_distribution_ml']
Returns:
Type DescriptionAll requested DataFrames.
Source code in cuisto/io.py
def load_dfs(\n filepath: str,\n fmt: str,\n identifiers: list[str] = [\n \"df_regions\",\n \"df_coordinates\",\n \"df_distribution_ap\",\n \"df_distribution_dv\",\n \"df_distribution_ml\",\n ],\n):\n \"\"\"\n Load DataFrames from file.\n\n If `fmt` is \"h5\" (\"xslx\"), identifiers are interpreted as h5 group identifier (sheet\n name, respectively).\n If `fmt` is \"pickle\", \"csv\" or \"tsv\", identifiers are appended to `filename`.\n Path to the file can't have a dot (\".\") in it.\n\n Parameters\n ----------\n filepath : str\n Full path to the file(s), without extension.\n fmt : {\"h5\", \"csv\", \"pickle\", \"xlsx\"}\n File(s) format.\n identifiers : list of str, optional\n List of identifiers to load from files. Defaults to the ones saved in\n cuisto.process.process_animals().\n\n Returns\n -------\n All requested DataFrames.\n\n \"\"\"\n # ensure filename without extension\n base_path = os.path.splitext(filepath)[0]\n full_path = base_path + \".\" + fmt\n\n res = []\n if (fmt == \"h5\") or (fmt == \"hdf\") or (fmt == \"hdf5\"):\n for identifier in identifiers:\n res.append(pd.read_hdf(full_path, identifier))\n elif fmt == \"xlsx\":\n for identifier in identifiers:\n res.append(pd.read_excel(full_path, sheet_name=identifier))\n else:\n for identifier in identifiers:\n id_path = f\"{base_path}_{identifier}.{fmt}\"\n if (fmt == \"pickle\") or (fmt == \"pkl\"):\n res.append(pd.read_pickle(id_path))\n elif fmt == \"csv\":\n res.append(pd.read_csv(id_path))\n elif fmt == \"tsv\":\n res.append(pd.read_csv(id_path, sep=\"\\t\"))\n else:\n raise ValueError(f\"{fmt} is not supported.\")\n\n return res\n
"},{"location":"api-io.html#cuisto.io.save_dfs","title":"save_dfs(out_dir, filename, dfs)
","text":"Save DataFrames to file.
File format is inferred from file name extension.
Parameters:
Name Type Description Defaultout_dir
str
Output directory.
requiredfilename
_type_
File name.
requireddfs
dict
DataFrames to save, as {identifier: df}. If HDF5 or xlsx, all df are saved in the same file, otherwise identifier is appended to the file name.
required Source code incuisto/io.py
def save_dfs(out_dir: str, filename, dfs: dict):\n \"\"\"\n Save DataFrames to file.\n\n File format is inferred from file name extension.\n\n Parameters\n ----------\n out_dir : str\n Output directory.\n filename : _type_\n File name.\n dfs : dict\n DataFrames to save, as {identifier: df}. If HDF5 or xlsx, all df are saved in\n the same file, otherwise identifier is appended to the file name.\n\n \"\"\"\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n basename, ext = os.path.splitext(filename)\n if ext in [\".h5\", \".hdf\", \".hdf5\"]:\n path = os.path.join(out_dir, filename)\n for identifier, df in dfs.items():\n df.to_hdf(path, key=identifier)\n elif ext == \".xlsx\":\n for identifier, df in dfs.items():\n df.to_excel(path, sheet_name=identifier)\n else:\n for identifier, df in dfs.items():\n path = os.path.join(out_dir, f\"{basename}_{identifier}{ext}\")\n if ext in [\".pickle\", \".pkl\"]:\n df.to_pickle(path)\n elif ext == \".csv\":\n df.to_csv(path)\n elif ext == \".tsv\":\n df.to_csv(path, sep=\"\\t\")\n else:\n raise ValueError(f\"{filename} has an unsupported extension.\")\n
"},{"location":"api-process.html","title":"cuisto.process","text":"process module, part of cuisto.
Wraps other functions for a click&play behaviour. Relies on the configuration file.
"},{"location":"api-process.html#cuisto.process.process_animal","title":"process_animal(animal, df_annotations, df_detections, cfg, compute_distributions=True)
","text":"Quantify objects for one animal.
Fetch required files and compute objects' distributions in brain regions, spatial distributions and gather Atlas coordinates.
Parameters:
Name Type Description Defaultanimal
str
Animal ID.
requireddf_annotations
DataFrame
DataFrames of QuPath Annotations and Detections.
requireddf_detections
DataFrame
DataFrames of QuPath Annotations and Detections.
requiredcfg
Config
The configuration loaded from TOML configuration file.
requiredcompute_distributions
bool
If False, do not compute the 1D distributions and return an empty list.Default is True.
True
Returns:
Name Type Descriptiondf_regions
DataFrame
Metrics in brain regions. One entry for each hemisphere of each brain regions.
df_distribution
list of pandas.DataFrame
Rostro-caudal distribution, as raw count and probability density function, in each axis.
df_coordinates
DataFrame
Atlas coordinates of each points.
Source code incuisto/process.py
def process_animal(\n animal: str,\n df_annotations: pd.DataFrame,\n df_detections: pd.DataFrame,\n cfg,\n compute_distributions: bool = True,\n) -> tuple[pd.DataFrame, list[pd.DataFrame], pd.DataFrame]:\n \"\"\"\n Quantify objects for one animal.\n\n Fetch required files and compute objects' distributions in brain regions, spatial\n distributions and gather Atlas coordinates.\n\n Parameters\n ----------\n animal : str\n Animal ID.\n df_annotations, df_detections : pd.DataFrame\n DataFrames of QuPath Annotations and Detections.\n cfg : cuisto.Config\n The configuration loaded from TOML configuration file.\n compute_distributions : bool, optional\n If False, do not compute the 1D distributions and return an empty list.Default\n is True.\n\n Returns\n -------\n df_regions : pandas.DataFrame\n Metrics in brain regions. One entry for each hemisphere of each brain regions.\n df_distribution : list of pandas.DataFrame\n Rostro-caudal distribution, as raw count and probability density function, in\n each axis.\n df_coordinates : pandas.DataFrame\n Atlas coordinates of each points.\n\n \"\"\"\n # - Annotations data cleanup\n # filter regions\n df_annotations = utils.filter_df_regions(\n df_annotations, [\"Root\", \"root\"], mode=\"remove\", col=\"Name\"\n )\n df_annotations = utils.filter_df_regions(\n df_annotations, cfg.atlas[\"blacklist\"], mode=\"remove\", col=\"Name\"\n )\n # add hemisphere\n df_annotations = utils.add_hemisphere(df_annotations, cfg.hemispheres[\"names\"])\n # remove objects in non-leaf regions\n df_annotations = utils.filter_df_regions(\n df_annotations, cfg.atlas[\"leaveslist\"], mode=\"keep\", col=\"Name\"\n )\n # merge regions\n df_annotations = utils.merge_regions(\n df_annotations, col=\"Name\", fusion_file=cfg.files[\"fusion\"]\n )\n if compute_distributions:\n # - Detections data cleanup\n # remove objects not in selected classifications\n df_detections = utils.filter_df_classifications(\n df_detections, cfg.object_type, mode=\"keep\", col=\"Classification\"\n )\n # remove objects from blacklisted regions and \"Root\"\n df_detections = utils.filter_df_regions(\n df_detections, cfg.atlas[\"blacklist\"], mode=\"remove\", col=\"Parent\"\n )\n # add hemisphere\n df_detections = utils.add_hemisphere(\n df_detections,\n cfg.hemispheres[\"names\"],\n cfg.atlas[\"midline\"],\n col=\"Atlas_Z\",\n atlas_type=cfg.atlas[\"type\"],\n )\n # add detection channel\n df_detections = utils.add_channel(\n df_detections, cfg.object_type, cfg.channels[\"names\"]\n )\n # convert coordinates to mm\n df_detections[[\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"]] = df_detections[\n [\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"]\n ].divide(1000)\n # convert to sterotaxic coordinates\n if cfg.distributions[\"stereo\"]:\n (\n df_detections[\"Atlas_AP\"],\n df_detections[\"Atlas_DV\"],\n df_detections[\"Atlas_ML\"],\n ) = utils.ccf_to_stereo(\n df_detections[\"Atlas_X\"],\n df_detections[\"Atlas_Y\"],\n df_detections[\"Atlas_Z\"],\n )\n else:\n (\n df_detections[\"Atlas_AP\"],\n df_detections[\"Atlas_DV\"],\n df_detections[\"Atlas_ML\"],\n ) = (\n df_detections[\"Atlas_X\"],\n df_detections[\"Atlas_Y\"],\n df_detections[\"Atlas_Z\"],\n )\n\n # - Computations\n # get regions distributions\n df_regions = compute.get_regions_metrics(\n df_annotations,\n cfg.object_type,\n cfg.channels[\"names\"],\n cfg.regions[\"base_measurement\"],\n cfg.regions[\"metrics\"],\n )\n colstonorm = [v for v in cfg.regions[\"metrics\"].values() if \"relative\" not in v]\n\n # normalize by starter cells\n if cfg.regions[\"normalize_starter_cells\"]:\n df_regions = compute.normalize_starter_cells(\n df_regions, colstonorm, animal, cfg.files[\"infos\"], cfg.channels[\"names\"]\n )\n\n # get AP, DV, ML distributions in stereotaxic coordinates\n if compute_distributions:\n dfs_distributions = [\n compute.get_distribution(\n df_detections,\n axis,\n cfg.distributions[\"hue\"],\n cfg.distributions[\"hue_filter\"],\n cfg.distributions[\"common_norm\"],\n stereo_lim,\n nbins=nbins,\n )\n for axis, stereo_lim, nbins in zip(\n [\"Atlas_AP\", \"Atlas_DV\", \"Atlas_ML\"],\n [\n cfg.distributions[\"ap_lim\"],\n cfg.distributions[\"dv_lim\"],\n cfg.distributions[\"ml_lim\"],\n ],\n [\n cfg.distributions[\"ap_nbins\"],\n cfg.distributions[\"dv_nbins\"],\n cfg.distributions[\"dv_nbins\"],\n ],\n )\n ]\n else:\n dfs_distributions = []\n\n # add animal tag to each DataFrame\n df_detections[\"animal\"] = animal\n df_regions[\"animal\"] = animal\n for df in dfs_distributions:\n df[\"animal\"] = animal\n\n return df_regions, dfs_distributions, df_detections\n
"},{"location":"api-process.html#cuisto.process.process_animals","title":"process_animals(wdir, animals, cfg, out_fmt=None, compute_distributions=True)
","text":"Get data from all animals and plot.
Parameters:
Name Type Description Defaultwdir
str
Base working directory, containing animals
folders.
animals
list-like of str
List of animals ID.
requiredcfg
Configuration object.
requiredout_fmt
(None, h5, csv, tsv, xslx, pickle)
Output file(s) format, if None, nothing is saved (default).
None
compute_distributions
bool
If False, do not compute the 1D distributions and return an empty list.Default is True.
True
Returns:
Name Type Descriptiondf_regions
DataFrame
Metrics in brain regions. One entry for each hemisphere of each brain regions.
df_distribution
list of pandas.DataFrame
Rostro-caudal distribution, as raw count and probability density function, in each axis.
df_coordinates
DataFrame
Atlas coordinates of each points.
Source code incuisto/process.py
def process_animals(\n wdir: str,\n animals: list[str] | tuple[str],\n cfg,\n out_fmt: str | None = None,\n compute_distributions: bool = True,\n) -> tuple[pd.DataFrame]:\n \"\"\"\n Get data from all animals and plot.\n\n Parameters\n ----------\n wdir : str\n Base working directory, containing `animals` folders.\n animals : list-like of str\n List of animals ID.\n cfg: cuisto.Config\n Configuration object.\n out_fmt : {None, \"h5\", \"csv\", \"tsv\", \"xslx\", \"pickle\"}\n Output file(s) format, if None, nothing is saved (default).\n compute_distributions : bool, optional\n If False, do not compute the 1D distributions and return an empty list.Default\n is True.\n\n\n Returns\n -------\n df_regions : pandas.DataFrame\n Metrics in brain regions. One entry for each hemisphere of each brain regions.\n df_distribution : list of pandas.DataFrame\n Rostro-caudal distribution, as raw count and probability density function, in\n each axis.\n df_coordinates : pandas.DataFrame\n Atlas coordinates of each points.\n\n \"\"\"\n\n # -- Preparation\n df_regions = []\n dfs_distributions = []\n df_coordinates = []\n\n # -- Processing\n pbar = tqdm(animals)\n\n for animal in pbar:\n pbar.set_description(f\"Processing {animal}\")\n\n # combine all detections and annotations from this animal\n df_annotations = io.cat_csv_dir(\n io.get_measurements_directory(\n wdir, animal, \"annotation\", cfg.segmentation_tag\n ),\n index_col=\"Object ID\",\n sep=\"\\t\",\n )\n if compute_distributions:\n df_detections = io.cat_data_dir(\n io.get_measurements_directory(\n wdir, animal, \"detection\", cfg.segmentation_tag\n ),\n cfg.segmentation_tag,\n index_col=\"Object ID\",\n sep=\"\\t\",\n hemisphere_names=cfg.hemispheres[\"names\"],\n atlas=cfg.bg_atlas,\n )\n else:\n df_detections = pd.DataFrame()\n\n # get results\n df_reg, dfs_dis, df_coo = process_animal(\n animal,\n df_annotations,\n df_detections,\n cfg,\n compute_distributions=compute_distributions,\n )\n\n # collect results\n df_regions.append(df_reg)\n dfs_distributions.append(dfs_dis)\n df_coordinates.append(df_coo)\n\n # concatenate all results\n df_regions = pd.concat(df_regions, ignore_index=True)\n dfs_distributions = [\n pd.concat(dfs_list, ignore_index=True) for dfs_list in zip(*dfs_distributions)\n ]\n df_coordinates = pd.concat(df_coordinates, ignore_index=True)\n\n # -- Saving\n if out_fmt:\n outdir = os.path.join(wdir, \"quantification\")\n outfile = f\"{cfg.object_type.lower()}_{cfg.atlas[\"type\"]}_{'-'.join(animals)}.{out_fmt}\"\n dfs = dict(\n df_regions=df_regions,\n df_coordinates=df_coordinates,\n df_distribution_ap=dfs_distributions[0],\n df_distribution_dv=dfs_distributions[1],\n df_distribution_ml=dfs_distributions[2],\n )\n io.save_dfs(outdir, outfile, dfs)\n\n return df_regions, dfs_distributions, df_coordinates\n
"},{"location":"api-script-qupath-script-runner.html","title":"qupath_script_runner","text":"Template to show how to run groovy script with QuPath, multi-threaded.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.EXCLUDE_LIST","title":"EXCLUDE_LIST = []
module-attribute
","text":"Images names to NOT run the script on.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.NTHREADS","title":"NTHREADS = 5
module-attribute
","text":"Number of threads to use.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.QPROJ_PATH","title":"QPROJ_PATH = '/path/to/qupath/project.qproj'
module-attribute
","text":"Full path to the QuPath project.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.QUIET","title":"QUIET = True
module-attribute
","text":"Use QuPath in quiet mode, eg. with minimal verbosity.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.QUPATH_EXE","title":"QUPATH_EXE = '/path/to/the/qupath/QuPath-0.5.1 (console).exe'
module-attribute
","text":"Path to the QuPath executable (console mode).
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.SAVE","title":"SAVE = True
module-attribute
","text":"Whether to save the project after the script ran on an image.
"},{"location":"api-script-qupath-script-runner.html#scripts.qupath_script_template.SCRIPT_PATH","title":"SCRIPT_PATH = '/path/to/the/script.groovy'
module-attribute
","text":"Path to the groovy script.
"},{"location":"api-script-segment.html","title":"segment_images","text":"Script to segment objects from images.
For fiber-like objects, binarize and skeletonize the image, then use skan
to extract branches coordinates. For polygon-like objects, binarize the image and detect objects and extract contours coordinates. For points, treat that as polygons then extract the centroids instead of contours. Finally, export the coordinates as collections in geojson files, importable in QuPath. Supports any number of channel of interest within the same image. One file output file per channel will be created.
This script uses cuisto.seg
. It is designed to work on probability maps generated from a pixel classifier in QuPath, but might work on raw images.
Usage : fill-in the Parameters section of the script and run it. A \"geojson\" folder will be created in the parent directory of IMAGES_DIR
. To exclude objects near the edges of an ROI, specify the path to masks stored as images with the same names as probabilities images (without their suffix).
author : Guillaume Le Goc (g.legoc@posteo.org) @ NeuroPSI version : 2024.12.10
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.CHANNELS_PARAMS","title":"CHANNELS_PARAMS = [{'name': 'cy5', 'target_channel': 0, 'proba_threshold': 0.85, 'qp_class': 'Fibers: Cy5', 'qp_color': [164, 250, 120]}, {'name': 'dsred', 'target_channel': 1, 'proba_threshold': 0.65, 'qp_class': 'Fibers: DsRed', 'qp_color': [224, 153, 18]}, {'name': 'egfp', 'target_channel': 2, 'proba_threshold': 0.85, 'qp_class': 'Fibers: EGFP', 'qp_color': [135, 11, 191]}]
module-attribute
","text":"This should be a list of dictionary (one per channel) with keys :
EDGE_DIST = 0
module-attribute
","text":"Distance to brain edge to ignore, in \u00b5m. 0 to disable.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.FILTERS","title":"FILTERS = {'length_low': 1.5, 'area_low': 10, 'area_high': 1000, 'ecc_low': 0.0, 'ecc_high': 0.9, 'dist_thresh': 30}
module-attribute
","text":"Dictionary with keys :
IMAGES_DIR = '/path/to/images'
module-attribute
","text":"Full path to the images to segment.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.IMG_SUFFIX","title":"IMG_SUFFIX = '_Probabilities.tiff'
module-attribute
","text":"Images suffix, including extension. Masks must be the same name without the suffix.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.MASKS_DIR","title":"MASKS_DIR = 'path/to/corresponding/masks'
module-attribute
","text":"Full path to the masks, to exclude objects near the brain edges (set to None or empty string to disable this feature).
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.MASKS_EXT","title":"MASKS_EXT = 'tiff'
module-attribute
","text":"Masks files extension.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.MAX_PIX_VALUE","title":"MAX_PIX_VALUE = 255
module-attribute
","text":"Maximum pixel possible value to adjust proba_threshold
.
ORIGINAL_PIXELSIZE = 0.45
module-attribute
","text":"Original images pixel size in microns. This is in case the pixel classifier uses a lower resolution, yielding smaller probability maps, so output objects coordinates need to be rescaled to the full size images. The pixel size is written in the \"Image\" tab in QuPath.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.QUPATH_TYPE","title":"QUPATH_TYPE = 'detection'
module-attribute
","text":"QuPath object type.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.SEGTYPE","title":"SEGTYPE = 'boutons'
module-attribute
","text":"Type of segmentation.
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.get_geojson_dir","title":"get_geojson_dir(images_dir)
","text":"Get the directory of geojson files, which will be in the parent directory of images_dir
.
If the directory does not exist, create it.
Parameters:
Name Type Description Defaultimages_dir
str
required Returns:
Name Type Descriptiongeojson_dir
str
Source code in scripts/segmentation/segment_images.py
def get_geojson_dir(images_dir: str):\n \"\"\"\n Get the directory of geojson files, which will be in the parent directory\n of `images_dir`.\n\n If the directory does not exist, create it.\n\n Parameters\n ----------\n images_dir : str\n\n Returns\n -------\n geojson_dir : str\n\n \"\"\"\n\n geojson_dir = os.path.join(Path(images_dir).parent, \"geojson\")\n\n if not os.path.isdir(geojson_dir):\n os.mkdir(geojson_dir)\n\n return geojson_dir\n
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.get_geojson_properties","title":"get_geojson_properties(name, color, objtype='detection')
","text":"Return geojson objects properties as a dictionnary, ready to be used in geojson.Feature.
Parameters:
Name Type Description Defaultname
str
Classification name.
requiredcolor
tuple or list
Classification color in RGB (3-elements vector).
requiredobjtype
str
Object type (\"detection\" or \"annotation\"). Default is \"detection\".
'detection'
Returns:
Name Type Descriptionprops
dict
Source code in scripts/segmentation/segment_images.py
def get_geojson_properties(name: str, color: tuple | list, objtype: str = \"detection\"):\n \"\"\"\n Return geojson objects properties as a dictionnary, ready to be used in geojson.Feature.\n\n Parameters\n ----------\n name : str\n Classification name.\n color : tuple or list\n Classification color in RGB (3-elements vector).\n objtype : str, optional\n Object type (\"detection\" or \"annotation\"). Default is \"detection\".\n\n Returns\n -------\n props : dict\n\n \"\"\"\n\n return {\n \"objectType\": objtype,\n \"classification\": {\"name\": name, \"color\": color},\n \"isLocked\": \"true\",\n }\n
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.get_seg_method","title":"get_seg_method(segtype)
","text":"Determine what kind of segmentation is performed.
Segmentation kind are, for now, lines, polygons or points. We detect that based on hardcoded keywords.
Parameters:
Name Type Description Defaultsegtype
str
required Returns:
Name Type Descriptionseg_method
str
Source code in scripts/segmentation/segment_images.py
def get_seg_method(segtype: str):\n \"\"\"\n Determine what kind of segmentation is performed.\n\n Segmentation kind are, for now, lines, polygons or points. We detect that based on\n hardcoded keywords.\n\n Parameters\n ----------\n segtype : str\n\n Returns\n -------\n seg_method : str\n\n \"\"\"\n\n line_list = [\"fibers\", \"axons\", \"fiber\", \"axon\"]\n point_list = [\"synapto\", \"synaptophysin\", \"syngfp\", \"boutons\", \"points\"]\n polygon_list = [\"cells\", \"polygon\", \"polygons\", \"polygon\", \"cell\"]\n\n if segtype in line_list:\n seg_method = \"lines\"\n elif segtype in polygon_list:\n seg_method = \"polygons\"\n elif segtype in point_list:\n seg_method = \"points\"\n else:\n raise ValueError(\n f\"Could not determine method to use based on segtype : {segtype}.\"\n )\n\n return seg_method\n
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.parameters_as_dict","title":"parameters_as_dict(images_dir, masks_dir, segtype, name, proba_threshold, edge_dist)
","text":"Get information as a dictionnary.
Parameters:
Name Type Description Defaultimages_dir
str
Path to images to be segmented.
requiredmasks_dir
str
Path to images masks.
requiredsegtype
str
Segmentation type (eg. \"fibers\").
requiredname
str
Name of the segmentation (eg. \"green\").
requiredproba_threshold
float < 1
Probability threshold.
requirededge_dist
float
Distance in \u00b5m to the brain edge that is ignored.
requiredReturns:
Name Type Descriptionparams
dict
Source code in scripts/segmentation/segment_images.py
def parameters_as_dict(\n images_dir: str,\n masks_dir: str,\n segtype: str,\n name: str,\n proba_threshold: float,\n edge_dist: float,\n):\n \"\"\"\n Get information as a dictionnary.\n\n Parameters\n ----------\n images_dir : str\n Path to images to be segmented.\n masks_dir : str\n Path to images masks.\n segtype : str\n Segmentation type (eg. \"fibers\").\n name : str\n Name of the segmentation (eg. \"green\").\n proba_threshold : float < 1\n Probability threshold.\n edge_dist : float\n Distance in \u00b5m to the brain edge that is ignored.\n\n Returns\n -------\n params : dict\n\n \"\"\"\n\n return {\n \"images_location\": images_dir,\n \"masks_location\": masks_dir,\n \"type\": segtype,\n \"probability threshold\": proba_threshold,\n \"name\": name,\n \"edge distance\": edge_dist,\n }\n
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.process_directory","title":"process_directory(images_dir, img_suffix='', segtype='', original_pixelsize=1.0, target_channel=0, proba_threshold=0.0, qupath_class='Object', qupath_color=[0, 0, 0], channel_suffix='', edge_dist=0.0, filters={}, masks_dir='', masks_ext='')
","text":"Main function, processes the .ome.tiff files in the input directory.
Parameters:
Name Type Description Defaultimages_dir
str
Animal ID to process.
requiredimg_suffix
str
Images suffix, including extension.
''
segtype
str
Segmentation type.
''
original_pixelsize
float
Original images pixel size in microns.
1.0
target_channel
int
Index of the channel containning the objects of interest (eg. not the background), in the probability map (not the original images channels).
0
proba_threshold
float < 1
Probability below this value will be discarded (multiplied by MAX_PIXEL_VALUE
)
0.0
qupath_class
str
Name of the QuPath classification.
'Object'
qupath_color
list of three elements
Color associated to that classification in RGB.
[0, 0, 0]
channel_suffix
str
Channel name, will be used as a suffix in output geojson files.
''
edge_dist
float
Distance to the edge of the brain masks that will be ignored, in microns. Set to 0 to disable this feature.
0.0
filters
dict
Filters values to include or excludes objects. See the top of the script.
{}
masks_dir
str
Path to images masks, to exclude objects found near the edges. The masks must be with the same name as the corresponding image to be segmented, without its suffix. Default is \"\", which disables this feature.
''
masks_ext
str
Masks files extension, without leading \".\". Default is \"\"
''
Source code in scripts/segmentation/segment_images.py
def process_directory(\n images_dir: str,\n img_suffix: str = \"\",\n segtype: str = \"\",\n original_pixelsize: float = 1.0,\n target_channel: int = 0,\n proba_threshold: float = 0.0,\n qupath_class: str = \"Object\",\n qupath_color: list = [0, 0, 0],\n channel_suffix: str = \"\",\n edge_dist: float = 0.0,\n filters: dict = {},\n masks_dir: str = \"\",\n masks_ext: str = \"\",\n):\n \"\"\"\n Main function, processes the .ome.tiff files in the input directory.\n\n Parameters\n ----------\n images_dir : str\n Animal ID to process.\n img_suffix : str\n Images suffix, including extension.\n segtype : str\n Segmentation type.\n original_pixelsize : float\n Original images pixel size in microns.\n target_channel : int\n Index of the channel containning the objects of interest (eg. not the\n background), in the probability map (*not* the original images channels).\n proba_threshold : float < 1\n Probability below this value will be discarded (multiplied by `MAX_PIXEL_VALUE`)\n qupath_class : str\n Name of the QuPath classification.\n qupath_color : list of three elements\n Color associated to that classification in RGB.\n channel_suffix : str\n Channel name, will be used as a suffix in output geojson files.\n edge_dist : float\n Distance to the edge of the brain masks that will be ignored, in microns. Set to\n 0 to disable this feature.\n filters : dict\n Filters values to include or excludes objects. See the top of the script.\n masks_dir : str, optional\n Path to images masks, to exclude objects found near the edges. The masks must be\n with the same name as the corresponding image to be segmented, without its\n suffix. Default is \"\", which disables this feature.\n masks_ext : str, optional\n Masks files extension, without leading \".\". Default is \"\"\n\n \"\"\"\n\n # -- Preparation\n # get segmentation type\n seg_method = get_seg_method(segtype)\n\n # get output directory path\n geojson_dir = get_geojson_dir(images_dir)\n\n # get images list\n images_list = [\n os.path.join(images_dir, filename)\n for filename in os.listdir(images_dir)\n if filename.endswith(img_suffix)\n ]\n\n # write parameters\n parameters = parameters_as_dict(\n images_dir, masks_dir, segtype, channel_suffix, proba_threshold, edge_dist\n )\n param_file = os.path.join(geojson_dir, \"parameters\" + channel_suffix + \".txt\")\n if os.path.isfile(param_file):\n raise FileExistsError(\"Parameters file already exists.\")\n else:\n write_parameters(param_file, parameters, filters, original_pixelsize)\n\n # convert parameters to pixels in probability map\n pixelsize = hq.seg.get_pixelsize(images_list[0]) # get pixel size\n edge_dist = int(edge_dist / pixelsize)\n filters = hq.seg.convert_to_pixels(filters, pixelsize)\n\n # get rescaling factor\n rescale_factor = pixelsize / original_pixelsize\n\n # get GeoJSON properties\n geojson_props = get_geojson_properties(\n qupath_class, qupath_color, objtype=QUPATH_TYPE\n )\n\n # -- Processing\n pbar = tqdm(images_list)\n for imgpath in pbar:\n # build file names\n imgname = os.path.basename(imgpath)\n geoname = imgname.replace(img_suffix, \"\")\n geojson_file = os.path.join(\n geojson_dir, geoname + \"_segmentation\" + channel_suffix + \".geojson\"\n )\n\n # checks if output file already exists\n if os.path.isfile(geojson_file):\n continue\n\n # read images\n pbar.set_description(f\"{geoname}: Loading...\")\n img = tifffile.imread(imgpath, key=target_channel)\n if (edge_dist > 0) & (len(masks_dir) != 0):\n mask = tifffile.imread(os.path.join(masks_dir, geoname + \".\" + masks_ext))\n mask = hq.seg.pad_image(mask, img.shape) # resize mask\n # apply mask, eroding from the edges\n img = img * hq.seg.erode_mask(mask, edge_dist)\n\n # image processing\n pbar.set_description(f\"{geoname}: IP...\")\n\n # threshold probability and binarization\n img = img >= proba_threshold * MAX_PIX_VALUE\n\n # segmentation\n pbar.set_description(f\"{geoname}: Segmenting...\")\n\n if seg_method == \"lines\":\n collection = hq.seg.segment_lines(\n img,\n geojson_props,\n minsize=filters[\"length_low\"],\n rescale_factor=rescale_factor,\n )\n\n elif seg_method == \"polygons\":\n collection = hq.seg.segment_polygons(\n img,\n geojson_props,\n area_min=filters[\"area_low\"],\n area_max=filters[\"area_high\"],\n ecc_min=filters[\"ecc_low\"],\n ecc_max=filters[\"ecc_high\"],\n rescale_factor=rescale_factor,\n )\n\n elif seg_method == \"points\":\n collection = hq.seg.segment_points(\n img,\n geojson_props,\n area_min=filters[\"area_low\"],\n area_max=filters[\"area_high\"],\n ecc_min=filters[\"ecc_low\"],\n ecc_max=filters[\"ecc_high\"],\n dist_thresh=filters[\"dist_thresh\"],\n rescale_factor=rescale_factor,\n )\n else:\n # we already printed an error message\n return\n\n # save geojson\n pbar.set_description(f\"{geoname}: Saving...\")\n with open(geojson_file, \"w\") as fid:\n fid.write(geojson.dumps(collection))\n
"},{"location":"api-script-segment.html#scripts.segmentation.segment_images.write_parameters","title":"write_parameters(outfile, parameters, filters, original_pixelsize)
","text":"Write parameters to outfile
.
A timestamp will be added. Parameters are written as key = value, and a [filters] is added before filters parameters.
Parameters:
Name Type Description Defaultoutfile
str
Full path to the output file.
requiredparameters
dict
General parameters.
requiredfilters
dict
Filters parameters.
requiredoriginal_pixelsize
float
Size of pixels in original image.
required Source code inscripts/segmentation/segment_images.py
def write_parameters(\n outfile: str, parameters: dict, filters: dict, original_pixelsize: float\n):\n \"\"\"\n Write parameters to `outfile`.\n\n A timestamp will be added. Parameters are written as key = value,\n and a [filters] is added before filters parameters.\n\n Parameters\n ----------\n outfile : str\n Full path to the output file.\n parameters : dict\n General parameters.\n filters : dict\n Filters parameters.\n original_pixelsize : float\n Size of pixels in original image.\n\n \"\"\"\n\n with open(outfile, \"w\") as fid:\n fid.writelines(f\"date = {datetime.now().strftime('%d-%B-%Y %H:%M:%S')}\\n\")\n\n fid.writelines(f\"original_pixelsize = {original_pixelsize}\\n\")\n\n for key, value in parameters.items():\n fid.writelines(f\"{key} = {value}\\n\")\n\n fid.writelines(\"[filters]\\n\")\n\n for key, value in filters.items():\n fid.writelines(f\"{key} = {value}\\n\")\n
"},{"location":"api-seg.html","title":"cuisto.seg","text":"seg module, part of cuisto.
Functions for segmentating probability map stored as an image.
"},{"location":"api-seg.html#cuisto.seg.convert_to_pixels","title":"convert_to_pixels(filters, pixelsize)
","text":"Convert some values in filters
in pixels.
Parameters:
Name Type Description Defaultfilters
dict
Must contain the keys used below.
requiredpixelsize
float
Pixel size in microns.
requiredReturns:
Name Type Descriptionfilters
dict
Same as input, with values in pixels.
Source code incuisto/seg.py
def convert_to_pixels(filters, pixelsize):\n \"\"\"\n Convert some values in `filters` in pixels.\n\n Parameters\n ----------\n filters : dict\n Must contain the keys used below.\n pixelsize : float\n Pixel size in microns.\n\n Returns\n -------\n filters : dict\n Same as input, with values in pixels.\n\n \"\"\"\n\n filters[\"area_low\"] = filters[\"area_low\"] / pixelsize**2\n filters[\"area_high\"] = filters[\"area_high\"] / pixelsize**2\n filters[\"length_low\"] = filters[\"length_low\"] / pixelsize\n filters[\"dist_thresh\"] = int(filters[\"dist_thresh\"] / pixelsize)\n\n return filters\n
"},{"location":"api-seg.html#cuisto.seg.erode_mask","title":"erode_mask(mask, edge_dist)
","text":"Erode the mask outline so that is is edge_dist
smaller from the border.
This allows discarding the edges.
Parameters:
Name Type Description Defaultmask
ndarray
required edge_dist
float
Distance to edges, in pixels.
requiredReturns:
Name Type Descriptioneroded_mask
ndarray of bool
Source code in cuisto/seg.py
def erode_mask(mask: np.ndarray, edge_dist: float) -> np.ndarray:\n \"\"\"\n Erode the mask outline so that is is `edge_dist` smaller from the border.\n\n This allows discarding the edges.\n\n Parameters\n ----------\n mask : ndarray\n edge_dist : float\n Distance to edges, in pixels.\n\n Returns\n -------\n eroded_mask : ndarray of bool\n\n \"\"\"\n\n if edge_dist % 2 == 0:\n edge_dist += 1 # decomposition requires even number\n\n footprint = morphology.square(edge_dist, decomposition=\"sequence\")\n\n return mask * morphology.binary_erosion(mask, footprint=footprint)\n
"},{"location":"api-seg.html#cuisto.seg.get_collection_from_points","title":"get_collection_from_points(coords, properties, rescale_factor=1.0, offset=0.5)
","text":"Gather coordinates from coords
and put them in GeoJSON format.
An entry in coords
are pairs of (x, y) coordinates defining the point. properties
is a dictionnary with QuPath properties of each detections.
Parameters:
Name Type Description Defaultcoords
list
required properties
dict
required rescale_factor
float
Rescale output coordinates by this factor.
1.0
Returns:
Name Type Descriptioncollection
FeatureCollection
Source code in cuisto/seg.py
def get_collection_from_points(\n coords: list, properties: dict, rescale_factor: float = 1.0, offset: float = 0.5\n) -> geojson.FeatureCollection:\n \"\"\"\n Gather coordinates from `coords` and put them in GeoJSON format.\n\n An entry in `coords` are pairs of (x, y) coordinates defining the point.\n `properties` is a dictionnary with QuPath properties of each detections.\n\n Parameters\n ----------\n coords : list\n properties : dict\n rescale_factor : float\n Rescale output coordinates by this factor.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n\n \"\"\"\n\n collection = [\n geojson.Feature(\n geometry=shapely.Point(\n np.flip((coord + offset) * rescale_factor)\n ), # shape object\n properties=properties, # object properties\n id=str(uuid.uuid4()), # object uuid\n )\n for coord in coords\n ]\n\n return geojson.FeatureCollection(collection)\n
"},{"location":"api-seg.html#cuisto.seg.get_collection_from_poly","title":"get_collection_from_poly(contours, properties, rescale_factor=1.0, offset=0.5)
","text":"Gather coordinates in the list and put them in GeoJSON format as Polygons.
An entry in contours
must define a closed polygon. properties
is a dictionnary with QuPath properties of each detections.
Parameters:
Name Type Description Defaultcontours
list
required properties
dict
QuPatj objects' properties.
requiredrescale_factor
float
Rescale output coordinates by this factor.
1.0
offset
float
Shift coordinates by this amount, typically to get pixel centers or edges. Default is 0.5.
0.5
Returns:
Name Type Descriptioncollection
FeatureCollection
A FeatureCollection ready to be written as geojson.
Source code incuisto/seg.py
def get_collection_from_poly(\n contours: list, properties: dict, rescale_factor: float = 1.0, offset: float = 0.5\n) -> geojson.FeatureCollection:\n \"\"\"\n Gather coordinates in the list and put them in GeoJSON format as Polygons.\n\n An entry in `contours` must define a closed polygon. `properties` is a dictionnary\n with QuPath properties of each detections.\n\n Parameters\n ----------\n contours : list\n properties : dict\n QuPatj objects' properties.\n rescale_factor : float\n Rescale output coordinates by this factor.\n offset : float\n Shift coordinates by this amount, typically to get pixel centers or edges.\n Default is 0.5.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n A FeatureCollection ready to be written as geojson.\n\n \"\"\"\n collection = [\n geojson.Feature(\n geometry=shapely.Polygon(\n np.fliplr((contour + offset) * rescale_factor)\n ), # shape object\n properties=properties, # object properties\n id=str(uuid.uuid4()), # object uuid\n )\n for contour in contours\n ]\n\n return geojson.FeatureCollection(collection)\n
"},{"location":"api-seg.html#cuisto.seg.get_collection_from_skel","title":"get_collection_from_skel(skeleton, properties, rescale_factor=1.0, offset=0.5)
","text":"Get the coordinates of each skeleton path as a GeoJSON Features in a FeatureCollection. properties
is a dictionnary with QuPath properties of each detections.
Parameters:
Name Type Description Defaultskeleton
Skeleton
required properties
dict
QuPatj objects' properties.
requiredrescale_factor
float
Rescale output coordinates by this factor.
1.0
offset
float
Shift coordinates by this amount, typically to get pixel centers or edges. Default is 0.5.
0.5
Returns:
Name Type Descriptioncollection
FeatureCollection
A FeatureCollection ready to be written as geojson.
Source code incuisto/seg.py
def get_collection_from_skel(\n skeleton: Skeleton, properties: dict, rescale_factor: float = 1.0, offset=0.5\n) -> geojson.FeatureCollection:\n \"\"\"\n Get the coordinates of each skeleton path as a GeoJSON Features in a\n FeatureCollection.\n `properties` is a dictionnary with QuPath properties of each detections.\n\n Parameters\n ----------\n skeleton : skan.Skeleton\n properties : dict\n QuPatj objects' properties.\n rescale_factor : float\n Rescale output coordinates by this factor.\n offset : float\n Shift coordinates by this amount, typically to get pixel centers or edges.\n Default is 0.5.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n A FeatureCollection ready to be written as geojson.\n\n \"\"\"\n\n branch_data = summarize(skeleton, separator=\"_\")\n\n collection = []\n for ind in range(skeleton.n_paths):\n prop = properties.copy()\n prop[\"measurements\"] = {\"skeleton_id\": int(branch_data.loc[ind, \"skeleton_id\"])}\n collection.append(\n geojson.Feature(\n geometry=shapely.LineString(\n (skeleton.path_coordinates(ind)[:, ::-1] + offset) * rescale_factor\n ), # shape object\n properties=prop, # object properties\n id=str(uuid.uuid4()), # object uuid\n )\n )\n\n return geojson.FeatureCollection(collection)\n
"},{"location":"api-seg.html#cuisto.seg.get_image_skeleton","title":"get_image_skeleton(img, minsize=0)
","text":"Get the image skeleton.
Computes the image skeleton and removes objects smaller than minsize
.
Parameters:
Name Type Description Defaultimg
ndarray of bool
required minsize
number
Min. size the object can have, as a number of pixels. Default is 0.
0
Returns:
Name Type Descriptionskel
ndarray of bool
Binary image with 1-pixel wide skeleton.
Source code incuisto/seg.py
def get_image_skeleton(img: np.ndarray, minsize=0) -> np.ndarray:\n \"\"\"\n Get the image skeleton.\n\n Computes the image skeleton and removes objects smaller than `minsize`.\n\n Parameters\n ----------\n img : ndarray of bool\n minsize : number, optional\n Min. size the object can have, as a number of pixels. Default is 0.\n\n Returns\n -------\n skel : ndarray of bool\n Binary image with 1-pixel wide skeleton.\n\n \"\"\"\n\n skel = morphology.skeletonize(img)\n\n return morphology.remove_small_objects(skel, min_size=minsize, connectivity=2)\n
"},{"location":"api-seg.html#cuisto.seg.get_pixelsize","title":"get_pixelsize(image_name)
","text":"Get pixel size recorded in image_name
TIFF metadata.
Parameters:
Name Type Description Defaultimage_name
str
Full path to image.
requiredReturns:
Name Type Descriptionpixelsize
float
Pixel size in microns.
Source code incuisto/seg.py
def get_pixelsize(image_name: str) -> float:\n \"\"\"\n Get pixel size recorded in `image_name` TIFF metadata.\n\n Parameters\n ----------\n image_name : str\n Full path to image.\n\n Returns\n -------\n pixelsize : float\n Pixel size in microns.\n\n \"\"\"\n\n with tifffile.TiffFile(image_name) as tif:\n # XResolution is a tuple, numerator, denomitor. The inverse is the pixel size\n return (\n tif.pages[0].tags[\"XResolution\"].value[1]\n / tif.pages[0].tags[\"XResolution\"].value[0]\n )\n
"},{"location":"api-seg.html#cuisto.seg.pad_image","title":"pad_image(img, finalsize)
","text":"Pad image with zeroes to match expected final size.
Parameters:
Name Type Description Defaultimg
ndarray
required finalsize
tuple or list
nrows, ncolumns
requiredReturns:
Name Type Descriptionimgpad
ndarray
img with black borders.
Source code incuisto/seg.py
def pad_image(img: np.ndarray, finalsize: tuple | list) -> np.ndarray:\n \"\"\"\n Pad image with zeroes to match expected final size.\n\n Parameters\n ----------\n img : ndarray\n finalsize : tuple or list\n nrows, ncolumns\n\n Returns\n -------\n imgpad : ndarray\n img with black borders.\n\n \"\"\"\n\n final_h = finalsize[0] # requested number of rows (height)\n final_w = finalsize[1] # requested number of columns (width)\n original_h = img.shape[0] # input number of rows\n original_w = img.shape[1] # input number of columns\n\n a = (final_h - original_h) // 2 # vertical padding before\n aa = final_h - a - original_h # vertical padding after\n b = (final_w - original_w) // 2 # horizontal padding before\n bb = final_w - b - original_w # horizontal padding after\n\n return np.pad(img, pad_width=((a, aa), (b, bb)), mode=\"constant\")\n
"},{"location":"api-seg.html#cuisto.seg.segment_lines","title":"segment_lines(img, geojson_props, minsize=0.0, rescale_factor=1.0)
","text":"Wraps skeleton analysis to get paths coordinates.
Parameters:
Name Type Description Defaultimg
ndarray of bool
Binary image to segment as lines.
requiredgeojson_props
dict
GeoJSON properties of objects.
requiredminsize
float
Minimum size in pixels for an object.
0.0
rescale_factor
float
Rescale output coordinates by this factor.
1.0
Returns:
Name Type Descriptioncollection
FeatureCollection
A FeatureCollection ready to be written as geojson.
Source code incuisto/seg.py
def segment_lines(\n img: np.ndarray, geojson_props: dict, minsize=0.0, rescale_factor=1.0\n) -> geojson.FeatureCollection:\n \"\"\"\n Wraps skeleton analysis to get paths coordinates.\n\n Parameters\n ----------\n img : ndarray of bool\n Binary image to segment as lines.\n geojson_props : dict\n GeoJSON properties of objects.\n minsize : float\n Minimum size in pixels for an object.\n rescale_factor : float\n Rescale output coordinates by this factor.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n A FeatureCollection ready to be written as geojson.\n\n \"\"\"\n\n skel = get_image_skeleton(img, minsize=minsize)\n\n # get paths coordinates as FeatureCollection\n skeleton = Skeleton(skel, keep_images=False)\n return get_collection_from_skel(\n skeleton, geojson_props, rescale_factor=rescale_factor\n )\n
"},{"location":"api-seg.html#cuisto.seg.segment_points","title":"segment_points(img, geojson_props, area_min=0.0, area_max=np.inf, ecc_min=0, ecc_max=1, dist_thresh=0, rescale_factor=1)
","text":"Point segmentation.
First, segment polygons to apply shape filters, then extract their centroids, and remove isolated points as defined by dist_thresh
.
Parameters:
Name Type Description Defaultimg
ndarray of bool
Binary image to segment as points.
requiredgeojson_props
dict
GeoJSON properties of objects.
requiredarea_min
float
Minimum and maximum area in pixels for an object.
0.0
area_max
float
Minimum and maximum area in pixels for an object.
0.0
ecc_min
float
Minimum and maximum eccentricity for an object.
0
ecc_max
float
Minimum and maximum eccentricity for an object.
0
dist_thresh
float
Maximal distance in pixels between objects before considering them as isolated and remove them. 0 disables it.
0
rescale_factor
float
Rescale output coordinates by this factor.
1
Returns:
Name Type Descriptioncollection
FeatureCollection
A FeatureCollection ready to be written as geojson.
Source code incuisto/seg.py
def segment_points(\n img: np.ndarray,\n geojson_props: dict,\n area_min: float = 0.0,\n area_max: float = np.inf,\n ecc_min: float = 0,\n ecc_max: float = 1,\n dist_thresh: float = 0,\n rescale_factor: float = 1,\n) -> geojson.FeatureCollection:\n \"\"\"\n Point segmentation.\n\n First, segment polygons to apply shape filters, then extract their centroids,\n and remove isolated points as defined by `dist_thresh`.\n\n Parameters\n ----------\n img : ndarray of bool\n Binary image to segment as points.\n geojson_props : dict\n GeoJSON properties of objects.\n area_min, area_max : float\n Minimum and maximum area in pixels for an object.\n ecc_min, ecc_max : float\n Minimum and maximum eccentricity for an object.\n dist_thresh : float\n Maximal distance in pixels between objects before considering them as isolated and remove them.\n 0 disables it.\n rescale_factor : float\n Rescale output coordinates by this factor.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n A FeatureCollection ready to be written as geojson.\n\n \"\"\"\n\n # get objects properties\n stats = pd.DataFrame(\n measure.regionprops_table(\n measure.label(img), properties=(\"label\", \"area\", \"eccentricity\", \"centroid\")\n )\n )\n\n # keep objects matching filters\n stats = stats[\n (stats[\"area\"] >= area_min)\n & (stats[\"area\"] <= area_max)\n & (stats[\"eccentricity\"] >= ecc_min)\n & (stats[\"eccentricity\"] <= ecc_max)\n ]\n\n # create an image from centroids only\n stats[\"centroid-0\"] = stats[\"centroid-0\"].astype(int)\n stats[\"centroid-1\"] = stats[\"centroid-1\"].astype(int)\n bw = np.zeros(img.shape, dtype=bool)\n bw[stats[\"centroid-0\"], stats[\"centroid-1\"]] = True\n\n # filter isolated objects\n if dist_thresh:\n # dilation of points\n if dist_thresh % 2 == 0:\n dist_thresh += 1 # decomposition requires even number\n\n footprint = morphology.square(int(dist_thresh), decomposition=\"sequence\")\n dilated = measure.label(morphology.binary_dilation(bw, footprint=footprint))\n stats = pd.DataFrame(\n measure.regionprops_table(dilated, properties=(\"label\", \"area\"))\n )\n\n # objects that did not merge are alone\n toremove = stats[(stats[\"area\"] <= dist_thresh**2)]\n dilated[np.isin(dilated, toremove[\"label\"])] = 0 # remove them\n\n # apply mask\n bw = bw * dilated\n\n # get points coordinates\n coords = np.argwhere(bw)\n\n return get_collection_from_points(\n coords, geojson_props, rescale_factor=rescale_factor\n )\n
"},{"location":"api-seg.html#cuisto.seg.segment_polygons","title":"segment_polygons(img, geojson_props, area_min=0.0, area_max=np.inf, ecc_min=0.0, ecc_max=1.0, rescale_factor=1.0)
","text":"Polygon segmentation.
Parameters:
Name Type Description Defaultimg
ndarray of bool
Binary image to segment as polygons.
requiredgeojson_props
dict
GeoJSON properties of objects.
requiredarea_min
float
Minimum and maximum area in pixels for an object.
0.0
area_max
float
Minimum and maximum area in pixels for an object.
0.0
ecc_min
float
Minimum and maximum eccentricity for an object.
0.0
ecc_max
float
Minimum and maximum eccentricity for an object.
0.0
rescale_factor
float
Rescale output coordinates by this factor.
1.0
Returns:
Name Type Descriptioncollection
FeatureCollection
A FeatureCollection ready to be written as geojson.
Source code incuisto/seg.py
def segment_polygons(\n img: np.ndarray,\n geojson_props: dict,\n area_min: float = 0.0,\n area_max: float = np.inf,\n ecc_min: float = 0.0,\n ecc_max: float = 1.0,\n rescale_factor: float = 1.0,\n) -> geojson.FeatureCollection:\n \"\"\"\n Polygon segmentation.\n\n Parameters\n ----------\n img : ndarray of bool\n Binary image to segment as polygons.\n geojson_props : dict\n GeoJSON properties of objects.\n area_min, area_max : float\n Minimum and maximum area in pixels for an object.\n ecc_min, ecc_max : float\n Minimum and maximum eccentricity for an object.\n rescale_factor: float\n Rescale output coordinates by this factor.\n\n Returns\n -------\n collection : geojson.FeatureCollection\n A FeatureCollection ready to be written as geojson.\n\n \"\"\"\n\n label_image = measure.label(img)\n\n # get objects properties\n stats = pd.DataFrame(\n measure.regionprops_table(\n label_image, properties=(\"label\", \"area\", \"eccentricity\")\n )\n )\n\n # remove objects not matching filters\n toremove = stats[\n (stats[\"area\"] < area_min)\n | (stats[\"area\"] > area_max)\n | (stats[\"eccentricity\"] < ecc_min)\n | (stats[\"eccentricity\"] > ecc_max)\n ]\n\n label_image[np.isin(label_image, toremove[\"label\"])] = 0\n\n # find objects countours\n label_image = label_image > 0\n contours = measure.find_contours(label_image)\n\n return get_collection_from_poly(\n contours, geojson_props, rescale_factor=rescale_factor\n )\n
"},{"location":"api-utils.html","title":"cuisto.utils","text":"utils module, part of cuisto.
Contains utilities functions.
"},{"location":"api-utils.html#cuisto.utils.add_brain_region","title":"add_brain_region(df, atlas, col='Parent')
","text":"Add brain region to a DataFrame with Atlas_X
, Atlas_Y
and Atlas_Z
columns.
This uses Brainglobe Atlas API to query the atlas. It does not use the structure_from_coords() method, instead it manually converts the coordinates in stack indices, then get the corresponding annotation id and query the corresponding acronym -- because brainglobe-atlasapi is not vectorized at all.
Parameters:
Name Type Description Defaultdf
DataFrame
DataFrame with atlas coordinates in microns.
requiredatlas
BrainGlobeAtlas
required col
str
Column in which to put the regions acronyms. Default is \"Parent\".
'Parent'
Returns:
Name Type Descriptiondf
DataFrame
Same DataFrame with a new \"Parent\" column.
Source code incuisto/utils.py
def add_brain_region(\n df: pd.DataFrame, atlas: BrainGlobeAtlas, col=\"Parent\"\n) -> pd.DataFrame:\n \"\"\"\n Add brain region to a DataFrame with `Atlas_X`, `Atlas_Y` and `Atlas_Z` columns.\n\n This uses Brainglobe Atlas API to query the atlas. It does not use the\n structure_from_coords() method, instead it manually converts the coordinates in\n stack indices, then get the corresponding annotation id and query the corresponding\n acronym -- because brainglobe-atlasapi is not vectorized at all.\n\n Parameters\n ----------\n df : pd.DataFrame\n DataFrame with atlas coordinates in microns.\n atlas : BrainGlobeAtlas\n col : str, optional\n Column in which to put the regions acronyms. Default is \"Parent\".\n\n Returns\n -------\n df : pd.DataFrame\n Same DataFrame with a new \"Parent\" column.\n\n \"\"\"\n df_in = df.copy()\n\n res = atlas.resolution # microns <-> pixels conversion\n lims = atlas.shape_um # out of brain\n\n # set out-of-brain objects at 0 so we get \"root\" as their parent\n df_in.loc[(df_in[\"Atlas_X\"] >= lims[0]) | (df_in[\"Atlas_X\"] < 0), \"Atlas_X\"] = 0\n df_in.loc[(df_in[\"Atlas_Y\"] >= lims[1]) | (df_in[\"Atlas_Y\"] < 0), \"Atlas_Y\"] = 0\n df_in.loc[(df_in[\"Atlas_Z\"] >= lims[2]) | (df_in[\"Atlas_Z\"] < 0), \"Atlas_Z\"] = 0\n\n # build the multi index, in pixels and integers\n ixyz = (\n df_in[\"Atlas_X\"].divide(res[0]).astype(int),\n df_in[\"Atlas_Y\"].divide(res[1]).astype(int),\n df_in[\"Atlas_Z\"].divide(res[2]).astype(int),\n )\n # convert i, j, k indices in raveled indices\n linear_indices = np.ravel_multi_index(ixyz, dims=atlas.annotation.shape)\n # get the structure id from the annotation stack\n idlist = atlas.annotation.ravel()[linear_indices]\n # replace 0 which does not exist to 997 (root)\n idlist[idlist == 0] = 997\n\n # query the corresponding acronyms\n lookup = atlas.lookup_df.set_index(\"id\")\n df.loc[:, col] = lookup.loc[idlist, \"acronym\"].values\n\n return df\n
"},{"location":"api-utils.html#cuisto.utils.add_channel","title":"add_channel(df, object_type, channel_names)
","text":"Add channel as a measurement for detections DataFrame.
The channel is read from the Classification column, the latter having to be formatted as \"object_type: channel\".
Parameters:
Name Type Description Defaultdf
DataFrame
DataFrame with detections measurements.
requiredobject_type
str
Object type (primary classification).
requiredchannel_names
dict
Map between original channel names to something else.
requiredReturns:
Type DescriptionDataFrame
Same DataFrame with a \"channel\" column.
Source code incuisto/utils.py
def add_channel(\n df: pd.DataFrame, object_type: str, channel_names: dict\n) -> pd.DataFrame:\n \"\"\"\n Add channel as a measurement for detections DataFrame.\n\n The channel is read from the Classification column, the latter having to be\n formatted as \"object_type: channel\".\n\n Parameters\n ----------\n df : pd.DataFrame\n DataFrame with detections measurements.\n object_type : str\n Object type (primary classification).\n channel_names : dict\n Map between original channel names to something else.\n\n Returns\n -------\n pd.DataFrame\n Same DataFrame with a \"channel\" column.\n\n \"\"\"\n # check if there is something to do\n if \"channel\" in df.columns:\n return df\n\n kind = get_df_kind(df)\n if kind == \"annotation\":\n warnings.warn(\"Annotation DataFrame not supported.\")\n return df\n\n # add channel, from {class_name: channel} classification\n df[\"channel\"] = (\n df[\"Classification\"].str.replace(object_type + \": \", \"\").map(channel_names)\n )\n\n return df\n
"},{"location":"api-utils.html#cuisto.utils.add_hemisphere","title":"add_hemisphere(df, hemisphere_names, midline=5700, col='Atlas_Z', atlas_type='brain')
","text":"Add hemisphere (left/right) as a measurement for detections or annotations.
The hemisphere is read in the \"Classification\" column for annotations. The latter needs to be in the form \"Right: Name\" or \"Left: Name\". For detections, the input col
of df
is compared to midline
to assess if the object belong to the left or right hemispheres.
Parameters:
Name Type Description Defaultdf
DataFrame
DataFrame with detections or annotations measurements.
requiredhemisphere_names
dict
Map between \"Left\" and \"Right\" to something else.
requiredmidline
float
Used only for \"detections\" df
. Corresponds to the brain midline in microns, should be 5700 for CCFv3 and 1610 for spinal cord.
5700
col
str
Name of the column containing the Z coordinate (medio-lateral) in microns. Default is \"Atlas_Z\".
'Atlas_Z'
atlas_type
(brain, cord)
Type of atlas used for registration. Required because the brain atlas is swapped between left and right while the spinal cord atlas is not. Default is \"brain\".
\"brain\"
Returns:
Name Type Descriptiondf
DataFrame
The same DataFrame with a new \"hemisphere\" column
Source code incuisto/utils.py
def add_hemisphere(\n df: pd.DataFrame,\n hemisphere_names: dict,\n midline: float = 5700,\n col: str = \"Atlas_Z\",\n atlas_type: str = \"brain\",\n) -> pd.DataFrame:\n \"\"\"\n Add hemisphere (left/right) as a measurement for detections or annotations.\n\n The hemisphere is read in the \"Classification\" column for annotations. The latter\n needs to be in the form \"Right: Name\" or \"Left: Name\". For detections, the input\n `col` of `df` is compared to `midline` to assess if the object belong to the left or\n right hemispheres.\n\n Parameters\n ----------\n df : pandas.DataFrame\n DataFrame with detections or annotations measurements.\n hemisphere_names : dict\n Map between \"Left\" and \"Right\" to something else.\n midline : float\n Used only for \"detections\" `df`. Corresponds to the brain midline in microns,\n should be 5700 for CCFv3 and 1610 for spinal cord.\n col : str, optional\n Name of the column containing the Z coordinate (medio-lateral) in microns.\n Default is \"Atlas_Z\".\n atlas_type : {\"brain\", \"cord\"}, optional\n Type of atlas used for registration. Required because the brain atlas is swapped\n between left and right while the spinal cord atlas is not. Default is \"brain\".\n\n Returns\n -------\n df : pandas.DataFrame\n The same DataFrame with a new \"hemisphere\" column\n\n \"\"\"\n # check if there is something to do\n if \"hemisphere\" in df.columns:\n return df\n\n # get kind of DataFrame\n kind = get_df_kind(df)\n\n if kind == \"detection\":\n # use midline\n if atlas_type == \"brain\":\n # brain atlas : beyond midline, it's left\n df.loc[df[col] >= midline, \"hemisphere\"] = hemisphere_names[\"Left\"]\n df.loc[df[col] < midline, \"hemisphere\"] = hemisphere_names[\"Right\"]\n elif atlas_type == \"cord\":\n # cord atlas : below midline, it's left\n df.loc[df[col] <= midline, \"hemisphere\"] = hemisphere_names[\"Left\"]\n df.loc[df[col] > midline, \"hemisphere\"] = hemisphere_names[\"Right\"]\n\n elif kind == \"annotation\":\n # use Classification name -- this does not depend on atlas type\n df[\"hemisphere\"] = [name.split(\":\")[0] for name in df[\"Classification\"]]\n df[\"hemisphere\"] = df[\"hemisphere\"].map(hemisphere_names)\n\n return df\n
"},{"location":"api-utils.html#cuisto.utils.ccf_to_stereo","title":"ccf_to_stereo(x_ccf, y_ccf, z_ccf=0)
","text":"Convert X, Y, Z coordinates in CCFv3 to stereotaxis coordinates (as in Paxinos-Franklin atlas).
Coordinates are shifted, rotated and squeezed, see (1) for more info. Input must be in mm. x_ccf
corresponds to the anterio-posterior (rostro-caudal) axis. y_ccf
corresponds to the dorso-ventral axis. z_ccf
corresponds to the medio-lateral axis (left-right) axis.
Warning : it is a rough estimation.
(1) https://community.brain-map.org/t/how-to-transform-ccf-x-y-z-coordinates-into-stereotactic-coordinates/1858
Parameters:
Name Type Description Defaultx_ccf
floats or ndarray
Coordinates in CCFv3 space in mm.
requiredy_ccf
floats or ndarray
Coordinates in CCFv3 space in mm.
requiredz_ccf
float or ndarray
Coordinate in CCFv3 space in mm. Default is 0.
0
Returns:
Type Descriptionap, dv, ml : floats or np.ndarray
Stereotaxic coordinates in mm.
Source code incuisto/utils.py
def ccf_to_stereo(\n x_ccf: float | np.ndarray, y_ccf: float | np.ndarray, z_ccf: float | np.ndarray = 0\n) -> tuple:\n \"\"\"\n Convert X, Y, Z coordinates in CCFv3 to stereotaxis coordinates (as in\n Paxinos-Franklin atlas).\n\n Coordinates are shifted, rotated and squeezed, see (1) for more info. Input must be\n in mm.\n `x_ccf` corresponds to the anterio-posterior (rostro-caudal) axis.\n `y_ccf` corresponds to the dorso-ventral axis.\n `z_ccf` corresponds to the medio-lateral axis (left-right) axis.\n\n Warning : it is a rough estimation.\n\n (1) https://community.brain-map.org/t/how-to-transform-ccf-x-y-z-coordinates-into-stereotactic-coordinates/1858\n\n Parameters\n ----------\n x_ccf, y_ccf : floats or np.ndarray\n Coordinates in CCFv3 space in mm.\n z_ccf : float or np.ndarray, optional\n Coordinate in CCFv3 space in mm. Default is 0.\n\n Returns\n -------\n ap, dv, ml : floats or np.ndarray\n Stereotaxic coordinates in mm.\n\n \"\"\"\n # Center CCF on Bregma\n xstereo = -(x_ccf - 5.40) # anterio-posterior coordinate (rostro-caudal)\n ystereo = y_ccf - 0.44 # dorso-ventral coordinate\n ml = z_ccf - 5.70 # medio-lateral coordinate (left-right)\n\n # Rotate CCF of 5\u00b0\n angle = np.deg2rad(5)\n ap = xstereo * np.cos(angle) - ystereo * np.sin(angle)\n dv = xstereo * np.sin(angle) + ystereo * np.cos(angle)\n\n # Squeeze the dorso-ventral axis by 94.34%\n dv *= 0.9434\n\n return ap, dv, ml\n
"},{"location":"api-utils.html#cuisto.utils.filter_df_classifications","title":"filter_df_classifications(df, filter_list, mode='keep', col='Classification')
","text":"Filter a DataFrame whether specified col
column entries contain elements in filter_list
. Case insensitive.
If mode
is \"keep\", keep entries only if their col
in is in the list (default). If mode
is \"remove\", remove entries if their col
is in the list.
Parameters:
Name Type Description Defaultdf
DataFrame
required filter_list
list | tuple | str
List of words that should be present to trigger the filter.
requiredmode
keep or remove
Keep or remove entries from the list. Default is \"keep\".
'keep'
col
str
Key in df
. Default is \"Classification\".
'Classification'
Returns:
Type DescriptionDataFrame
Filtered DataFrame.
Source code incuisto/utils.py
def filter_df_classifications(\n df: pd.DataFrame, filter_list: list | tuple | str, mode=\"keep\", col=\"Classification\"\n) -> pd.DataFrame:\n \"\"\"\n Filter a DataFrame whether specified `col` column entries contain elements in\n `filter_list`. Case insensitive.\n\n If `mode` is \"keep\", keep entries only if their `col` in is in the list (default).\n If `mode` is \"remove\", remove entries if their `col` is in the list.\n\n Parameters\n ----------\n df : pd.DataFrame\n filter_list : list | tuple | str\n List of words that should be present to trigger the filter.\n mode : \"keep\" or \"remove\", optional\n Keep or remove entries from the list. Default is \"keep\".\n col : str, optional\n Key in `df`. Default is \"Classification\".\n\n Returns\n -------\n pd.DataFrame\n Filtered DataFrame.\n\n \"\"\"\n # check input\n if isinstance(filter_list, str):\n filter_list = [filter_list] # make sure it is a list\n\n if col not in df.columns:\n # might be because of 'Classification' instead of 'classification'\n col = col.capitalize()\n if col not in df.columns:\n raise KeyError(f\"{col} not in DataFrame.\")\n\n pattern = \"|\".join(f\".*{s}.*\" for s in filter_list)\n\n if mode == \"keep\":\n df_return = df[df[col].str.contains(pattern, case=False, regex=True)]\n elif mode == \"remove\":\n df_return = df[~df[col].str.contains(pattern, case=False, regex=True)]\n\n # check\n if len(df_return) == 0:\n raise ValueError(\n (\n f\"Filtering '{col}' with {filter_list} resulted in an\"\n + \" empty DataFrame, check your config file.\"\n )\n )\n return df_return\n
"},{"location":"api-utils.html#cuisto.utils.filter_df_regions","title":"filter_df_regions(df, filter_list, mode='keep', col='Parent')
","text":"Filters entries in df
based on wether their col
is in filter_list
or not.
If mode
is \"keep\", keep entries only if their col
in is in the list (default). If mode
is \"remove\", remove entries if their col
is in the list.
Parameters:
Name Type Description Defaultdf
DataFrame
required filter_list
list - like
List of regions to keep or remove from the DataFrame.
requiredmode
keep or remove
Keep or remove entries from the list. Default is \"keep\".
'keep'
col
str
Key in df
. Default is \"Parent\".
'Parent'
Returns:
Name Type Descriptiondf
DataFrame
Filtered DataFrame.
Source code incuisto/utils.py
def filter_df_regions(\n df: pd.DataFrame, filter_list: list | tuple, mode=\"keep\", col=\"Parent\"\n) -> pd.DataFrame:\n \"\"\"\n Filters entries in `df` based on wether their `col` is in `filter_list` or not.\n\n If `mode` is \"keep\", keep entries only if their `col` in is in the list (default).\n If `mode` is \"remove\", remove entries if their `col` is in the list.\n\n Parameters\n ----------\n df : pandas.DataFrame\n filter_list : list-like\n List of regions to keep or remove from the DataFrame.\n mode : \"keep\" or \"remove\", optional\n Keep or remove entries from the list. Default is \"keep\".\n col : str, optional\n Key in `df`. Default is \"Parent\".\n\n Returns\n -------\n df : pandas.DataFrame\n Filtered DataFrame.\n\n \"\"\"\n\n if mode == \"keep\":\n return df[df[col].isin(filter_list)]\n if mode == \"remove\":\n return df[~df[col].isin(filter_list)]\n
"},{"location":"api-utils.html#cuisto.utils.get_blacklist","title":"get_blacklist(file, atlas)
","text":"Build a list of regions to exclude from file.
File must be a TOML with [WITH_CHILDS] and [EXACT] sections.
Parameters:
Name Type Description Defaultfile
str
Full path the atlas_blacklist.toml file.
requiredatlas
BrainGlobeAtlas
Atlas to extract regions from.
requiredReturns:
Name Type Descriptionblack_list
list
Full list of acronyms to discard.
Source code incuisto/utils.py
def get_blacklist(file: str, atlas: BrainGlobeAtlas) -> list:\n \"\"\"\n Build a list of regions to exclude from file.\n\n File must be a TOML with [WITH_CHILDS] and [EXACT] sections.\n\n Parameters\n ----------\n file : str\n Full path the atlas_blacklist.toml file.\n atlas : BrainGlobeAtlas\n Atlas to extract regions from.\n\n Returns\n -------\n black_list : list\n Full list of acronyms to discard.\n\n \"\"\"\n with open(file, \"rb\") as fid:\n content = tomllib.load(fid)\n\n blacklist = [] # init. the list\n\n # add regions and their descendants\n for region in content[\"WITH_CHILDS\"][\"members\"]:\n blacklist.extend(\n [\n atlas.structures[id][\"acronym\"]\n for id in atlas.structures.tree.expand_tree(\n atlas.structures[region][\"id\"]\n )\n ]\n )\n\n # add regions specified exactly (no descendants)\n blacklist.extend(content[\"EXACT\"][\"members\"])\n\n return blacklist\n
"},{"location":"api-utils.html#cuisto.utils.get_data_coverage","title":"get_data_coverage(df, col='Atlas_AP', by='animal')
","text":"Get min and max in col
for each by
.
Used to get data coverage for each animal to plot in distributions.
Parameters:
Name Type Description Defaultdf
DataFrame
description
requiredcol
str
Key in df
, default is \"Atlas_X\".
'Atlas_AP'
by
str
Key in df
, default is \"animal\".
'animal'
Returns:
Type DescriptionDataFrame
min and max of col
for each by
, named \"X_min\", and \"X_max\".
cuisto/utils.py
def get_data_coverage(df: pd.DataFrame, col=\"Atlas_AP\", by=\"animal\") -> pd.DataFrame:\n \"\"\"\n Get min and max in `col` for each `by`.\n\n Used to get data coverage for each animal to plot in distributions.\n\n Parameters\n ----------\n df : pd.DataFrame\n _description_\n col : str, optional\n Key in `df`, default is \"Atlas_X\".\n by : str, optional\n Key in `df` , default is \"animal\".\n\n Returns\n -------\n pd.DataFrame\n min and max of `col` for each `by`, named \"X_min\", and \"X_max\".\n\n \"\"\"\n df_group = df.groupby([by])\n return pd.DataFrame(\n [\n df_group[col].min(),\n df_group[col].max(),\n ],\n index=[\"X_min\", \"X_max\"],\n )\n
"},{"location":"api-utils.html#cuisto.utils.get_df_kind","title":"get_df_kind(df)
","text":"Get DataFrame kind, eg. Annotations or Detections.
It is based on reading the Object Type of the first entry, so the DataFrame must have only one kind of object.
Parameters:
Name Type Description Defaultdf
DataFrame
required Returns:
Name Type Descriptionkind
str
\"detection\" or \"annotation\".
Source code incuisto/utils.py
def get_df_kind(df: pd.DataFrame) -> str:\n \"\"\"\n Get DataFrame kind, eg. Annotations or Detections.\n\n It is based on reading the Object Type of the first entry, so the DataFrame must\n have only one kind of object.\n\n Parameters\n ----------\n df : pandas.DataFrame\n\n Returns\n -------\n kind : str\n \"detection\" or \"annotation\".\n\n \"\"\"\n return df[\"Object type\"].iloc[0].lower()\n
"},{"location":"api-utils.html#cuisto.utils.get_injection_site","title":"get_injection_site(animal, info_file, channel, stereo=False)
","text":"Get the injection site coordinates associated with animal.
Parameters:
Name Type Description Defaultanimal
str
Animal ID.
requiredinfo_file
str
Path to TOML info file.
requiredchannel
str
Channel ID as in the TOML file.
requiredstereo
bool
Wether to convert coordinates in stereotaxis coordinates. Default is False.
False
Returns:
Type Descriptionx, y, z : floats
Injection site coordinates.
Source code incuisto/utils.py
def get_injection_site(\n animal: str, info_file: str, channel: str, stereo: bool = False\n) -> tuple:\n \"\"\"\n Get the injection site coordinates associated with animal.\n\n Parameters\n ----------\n animal : str\n Animal ID.\n info_file : str\n Path to TOML info file.\n channel : str\n Channel ID as in the TOML file.\n stereo : bool, optional\n Wether to convert coordinates in stereotaxis coordinates. Default is False.\n\n Returns\n -------\n x, y, z : floats\n Injection site coordinates.\n\n \"\"\"\n with open(info_file, \"rb\") as fid:\n info = tomllib.load(fid)\n\n if channel in info[animal]:\n x, y, z = info[animal][channel][\"injection_site\"]\n if stereo:\n x, y, z = ccf_to_stereo(x, y, z)\n else:\n x, y, z = None, None, None\n\n return x, y, z\n
"},{"location":"api-utils.html#cuisto.utils.get_leaves_list","title":"get_leaves_list(atlas)
","text":"Get the list of leaf brain regions.
Leaf brain regions are defined as regions without childs, eg. regions that are at the bottom of the hiearchy.
Parameters:
Name Type Description Defaultatlas
BrainGlobeAtlas
Atlas to extract regions from.
requiredReturns:
Name Type Descriptionleaves_list
list
Acronyms of leaf brain regions.
Source code incuisto/utils.py
def get_leaves_list(atlas: BrainGlobeAtlas) -> list:\n \"\"\"\n Get the list of leaf brain regions.\n\n Leaf brain regions are defined as regions without childs, eg. regions that are at\n the bottom of the hiearchy.\n\n Parameters\n ----------\n atlas : BrainGlobeAtlas\n Atlas to extract regions from.\n\n Returns\n -------\n leaves_list : list\n Acronyms of leaf brain regions.\n\n \"\"\"\n leaves_list = []\n for region in atlas.structures_list:\n if atlas.structures.tree[region[\"id\"]].is_leaf():\n leaves_list.append(region[\"acronym\"])\n\n return leaves_list\n
"},{"location":"api-utils.html#cuisto.utils.get_mapping_fusion","title":"get_mapping_fusion(fusion_file)
","text":"Get mapping dictionnary between input brain regions and new regions defined in atlas_fusion.toml
file.
The returned dictionnary can be used in DataFrame.replace().
Parameters:
Name Type Description Defaultfusion_file
str
Path to the TOML file with the merging rules.
requiredReturns:
Name Type Descriptionm
dict
Mapping as {old: new}.
Source code incuisto/utils.py
def get_mapping_fusion(fusion_file: str) -> dict:\n \"\"\"\n Get mapping dictionnary between input brain regions and new regions defined in\n `atlas_fusion.toml` file.\n\n The returned dictionnary can be used in DataFrame.replace().\n\n Parameters\n ----------\n fusion_file : str\n Path to the TOML file with the merging rules.\n\n Returns\n -------\n m : dict\n Mapping as {old: new}.\n\n \"\"\"\n with open(fusion_file, \"rb\") as fid:\n df = pd.DataFrame.from_dict(tomllib.load(fid), orient=\"index\").set_index(\n \"acronym\"\n )\n\n return (\n df.drop(columns=\"name\")[\"members\"]\n .explode()\n .reset_index()\n .set_index(\"members\")\n .to_dict()[\"acronym\"]\n )\n
"},{"location":"api-utils.html#cuisto.utils.get_starter_cells","title":"get_starter_cells(animal, channel, info_file)
","text":"Get the number of starter cells associated with animal.
Parameters:
Name Type Description Defaultanimal
str
Animal ID.
requiredchannel
str
Channel ID.
requiredinfo_file
str
Path to TOML info file.
requiredReturns:
Name Type Descriptionn_starters
int
Number of starter cells.
Source code incuisto/utils.py
def get_starter_cells(animal: str, channel: str, info_file: str) -> int:\n \"\"\"\n Get the number of starter cells associated with animal.\n\n Parameters\n ----------\n animal : str\n Animal ID.\n channel : str\n Channel ID.\n info_file : str\n Path to TOML info file.\n\n Returns\n -------\n n_starters : int\n Number of starter cells.\n\n \"\"\"\n with open(info_file, \"rb\") as fid:\n info = tomllib.load(fid)\n\n return info[animal][channel][\"starter_cells\"]\n
"},{"location":"api-utils.html#cuisto.utils.merge_regions","title":"merge_regions(df, col, fusion_file)
","text":"Merge brain regions following rules in the fusion_file.toml
file.
Apply this merging on col
of the input DataFrame. col
whose value is found in the members
sections in the file will be changed to the new acronym.
Parameters:
Name Type Description Defaultdf
DataFrame
required col
str
Column of df
on which to apply the mapping.
fusion_file
str
Path to the toml file with the merging rules.
requiredReturns:
Name Type Descriptiondf
DataFrame
Same DataFrame with regions renamed.
Source code incuisto/utils.py
def merge_regions(df: pd.DataFrame, col: str, fusion_file: str) -> pd.DataFrame:\n \"\"\"\n Merge brain regions following rules in the `fusion_file.toml` file.\n\n Apply this merging on `col` of the input DataFrame. `col` whose value is found in\n the `members` sections in the file will be changed to the new acronym.\n\n Parameters\n ----------\n df : pandas.DataFrame\n col : str\n Column of `df` on which to apply the mapping.\n fusion_file : str\n Path to the toml file with the merging rules.\n\n Returns\n -------\n df : pandas.DataFrame\n Same DataFrame with regions renamed.\n\n \"\"\"\n df[col] = df[col].replace(get_mapping_fusion(fusion_file))\n\n return df\n
"},{"location":"api-utils.html#cuisto.utils.renormalize_per_key","title":"renormalize_per_key(df, by, on)
","text":"Renormalize on
column by its sum for each by
.
Use case : relative density is computed for both hemispheres, so if one wants to plot only one hemisphere, the sum of the bars corresponding to one channel (by
) should be 1. So :
df = df[df[\"hemisphere\"] == \"Ipsi.\"] df = renormalize_per_key(df, \"channel\", \"relative density\") Then, the sum of \"relative density\" for each \"channel\" equals 1.
Parameters:
Name Type Description Defaultdf
DataFrame
required by
str
Key in df
. df
is normalized for each by
.
on
str
Key in df
. Measurement to be normalized.
Returns:
Name Type Descriptiondf
DataFrame
Same DataFrame with normalized on
column.
cuisto/utils.py
def renormalize_per_key(df: pd.DataFrame, by: str, on: str):\n \"\"\"\n Renormalize `on` column by its sum for each `by`.\n\n Use case : relative density is computed for both hemispheres, so if one wants to\n plot only one hemisphere, the sum of the bars corresponding to one channel (`by`)\n should be 1. So :\n >>> df = df[df[\"hemisphere\"] == \"Ipsi.\"]\n >>> df = renormalize_per_key(df, \"channel\", \"relative density\")\n Then, the sum of \"relative density\" for each \"channel\" equals 1.\n\n Parameters\n ----------\n df : pd.DataFrame\n by : str\n Key in `df`. `df` is normalized for each `by`.\n on : str\n Key in `df`. Measurement to be normalized.\n\n Returns\n -------\n df : pd.DataFrame\n Same DataFrame with normalized `on` column.\n\n \"\"\"\n norm = df.groupby(by)[on].sum()\n bys = df[by].unique()\n for key in bys:\n df.loc[df[by] == key, on] = df.loc[df[by] == key, on].divide(norm[key])\n\n return df\n
"},{"location":"api-utils.html#cuisto.utils.select_hemisphere_channel","title":"select_hemisphere_channel(df, hue, hue_filter, hue_mirror)
","text":"Select relevant data given hue and filters.
Returns the DataFrame with only things to be used.
Parameters:
Name Type Description Defaultdf
DataFrame
DataFrame to filter.
requiredhue
(hemisphere, channel)
hue that will be used in seaborn plots.
\"hemisphere\"
hue_filter
str
Selected data.
requiredhue_mirror
bool
Instead of keeping only hue_filter values, they will be plotted in mirror.
requiredReturns:
Name Type Descriptiondfplt
DataFrame
DataFrame to be used in plots.
Source code incuisto/utils.py
def select_hemisphere_channel(\n df: pd.DataFrame, hue: str, hue_filter: str, hue_mirror: bool\n) -> pd.DataFrame:\n \"\"\"\n Select relevant data given hue and filters.\n\n Returns the DataFrame with only things to be used.\n\n Parameters\n ----------\n df : pd.DataFrame\n DataFrame to filter.\n hue : {\"hemisphere\", \"channel\"}\n hue that will be used in seaborn plots.\n hue_filter : str\n Selected data.\n hue_mirror : bool\n Instead of keeping only hue_filter values, they will be plotted in mirror.\n\n Returns\n -------\n dfplt : pd.DataFrame\n DataFrame to be used in plots.\n\n \"\"\"\n dfplt = df.copy()\n\n if hue == \"hemisphere\":\n # hue_filter is used to select channels\n # keep only left and right hemispheres, not \"both\"\n dfplt = dfplt[dfplt[\"hemisphere\"] != \"both\"]\n if hue_filter == \"all\":\n hue_filter = dfplt[\"channel\"].unique()\n elif not isinstance(hue_filter, (list, tuple)):\n # it is allowed to select several channels so handle lists\n hue_filter = [hue_filter]\n dfplt = dfplt[dfplt[\"channel\"].isin(hue_filter)]\n elif hue == \"channel\":\n # hue_filter is used to select hemispheres\n # it can only be left, right, both or empty\n if hue_filter == \"both\":\n # handle if it's a coordinates DataFrame which doesn't have \"both\"\n if \"both\" not in dfplt[\"hemisphere\"].unique():\n # keep both hemispheres, don't do anything\n pass\n else:\n if hue_mirror:\n # we need to keep both hemispheres to plot them in mirror\n dfplt = dfplt[dfplt[\"hemisphere\"] != \"both\"]\n else:\n # we keep the metrics computed in both hemispheres\n dfplt = dfplt[dfplt[\"hemisphere\"] == \"both\"]\n else:\n # hue_filter should correspond to an hemisphere name\n dfplt = dfplt[dfplt[\"hemisphere\"] == hue_filter]\n else:\n # not handled. Just return the DataFrame without filtering, maybe it'll make\n # sense.\n warnings.warn(f\"{hue} should be 'channel' or 'hemisphere'.\")\n\n # check result\n if len(dfplt) == 0:\n warnings.warn(\n f\"hue={hue} and hue_filter={hue_filter} resulted in an empty subset.\"\n )\n\n return dfplt\n
"},{"location":"guide-create-pyramids.html","title":"Create pyramidal OME-TIFF","text":"This page will guide you to use the pyramid-creator
package, in the event the CZI file does not work directly in QuPath. The script will generate pyramids from OME-TIFF files exported from ZEN.
Tip
pyramid-creator
can also pyramidalize images using Python only with the --no-use-qupath
option.
This Python script uses QuPath under the hood, via a companion script called createPyramids.groovy
. It will find the OME-TIFF files and make QuPath run the groovy script on it, in console mode (without graphical user interface).
This script is standalone, eg. it does not rely on the cuisto
package. But installing the later makes sure all dependencies are installed (namely typer
and tqdm
with the QuPath backend and quite a few more for the Python backend).
pyramid-creator
moved to a standalone package that you can find here with installation and usage instructions.
You will find instructions on the dedicated project page over at Github.
For reference :
You will need conda
, follow those instructions to install it.
Then, create a virtual environment if you didn't already (pyramid-creator
can be installed in the environment for cuisto
) and install the pyramid-creator
package.
conda create -c conda-forge -n cuisto-env python=3.12 # not required if you already create an environment\nconda activate cuisto-env\npip install pyramid-creator\n
To use the Python backend (with tifffile
), replace the last line with : pip install pyramid-creator[python-backend]\n
To use the QuPath backend, a working QuPath installation is required, and the pyramid-creator
command needs to be aware of its location. To do so, first, install QuPath. By default, it will install in ~\\AppData\\QuPath-0.X.Y
. In any case, note down the installation location.
Then, you have several options : - Create a file in your user directory called \"QUPATH_PATH\" (without extension), containing the full path to the QuPath console executable. In my case, it reads : C:\\Users\\glegoc\\AppData\\Local\\QuPath-0.5.1\\QuPath-0.5.1 (console).exe
. Then, the pyramid-creator
script will read this file to find the QuPath executable. - Specify the QuPath path as an option when calling the command line interface (see the Usage section) :
pyramid-creator /path/to/your/images --qupath-path \"C:\\Users\\glegoc\\AppData\\Local\\QuPath-0.5.1\\QuPath-0.5.1 (console).exe\"\n
- Specify the QuPath path as an option when using the package in a Python script (see the Usage section) : from pyramid_creator import pyramidalize_directory\npyramidalize_directory(\"/path/to/your/images/\", qupath_path=\"C:\\Users\\glegoc\\AppData\\Local\\QuPath-0.5.1\\QuPath-0.5.1 (console).exe\")\n
- If you're using Windows, using QuPath v0.6.0, v0.5.1 or v0.5.0 and chose the default installation location, pyramid-creator
should find it automatically and write it down in the \"QUPATH_PATH\" file by itself."},{"location":"guide-create-pyramids.html#export-czi-to-ome-tiff","title":"Export CZI to OME-TIFF","text":"OME-TIFF is a specification of the TIFF image format. It specifies how the metadata should be written to the file to be interoperable between softwares. ZEN can export to OME-TIFF so you don't need to pay attention to metadata. Therefore, you won't need to specify pixel size and channels names and colors as it will be read directly from the OME-TIFF files.
The OME-TIFF files should be ready to be pyramidalized with the create_pyramids.py
script.
See the instructions on the dedicated project page over at Github.
"},{"location":"guide-install-abba.html","title":"Install ABBA","text":"You can head to the ABBA documentation for installation instructions. You'll see that a Windows installer is available. While it might be working great, I prefer to do it manually step-by-step to make sure everything is going well.
You will find below installation instructions for the regular ABBA Fiji plugin, which proposes only the mouse and rat brain atlases. To be able to use the Brainglobe atlases, you will need the Python version. The two can be installed alongside each other.
"},{"location":"guide-install-abba.html#abba-fiji","title":"ABBA Fiji","text":""},{"location":"guide-install-abba.html#install-fiji","title":"Install Fiji","text":"Install the \"batteries-included\" distribution of ImageJ, Fiji, from the official website.
Warning
Extract Fiji somewhere you have write access, otherwise Fiji will not be able to download and install plugins. In other words, put the folder in your User directory and not in C:\\, C:\\Program Files and the like.
We need to add the PTBIOP update site, managed by the bio-imaging and optics facility at EPFL, that contains the ABBA plugin.
Help > Update
... Manage Update Sites
. Look up PTBIOP
, and click on the check box. Apply and Close
, and Apply Changes
. This will download and install the required plugins. Restart ImageJ as suggested. Plugins > BIOP > Atlas > ABBA - ABBA start
, or simply type abba start
in the search box. Choose the \"Adult Mouse Brain - Allen Brain Atlas V3p1\". It will download this atlas and might take a while, depending on your Internet connection.ABBA can leverage the elastix toolbox for automatic 2D in-plane registration.
ABBA should be installed and functional ! You can check the official documentation for usage instructions and some tips here.
"},{"location":"guide-install-abba.html#abba-python","title":"ABBA Python","text":"Brainglobe is an initiative aiming at providing interoperable, model-agnostic Python-based tools for neuroanatomy. They package various published volumetric anatomical atlases of different species (check the list), including the Allen Mouse brain atlas (CCFv3, ref.) and a 3D version of the Allen mouse spinal cord atlas (ref).
To be able to leverage those atlases, we need to make ImageJ and Python be able to talk to each other. This is the purpose of abba_python, that will install ImageJ and its ABBA plugins inside a python environment, with bindings between the two worlds.
"},{"location":"guide-install-abba.html#install-conda","title":"Installconda
","text":"If not done already, follow those instructions to install conda
.
conda create -c conda-forge -n abba_python python=3.10 openjdk=11 maven pyimagej notebook\n
pip install abba-python==0.9.6.dev0\n
conda activate abba_python\n
brainglobe install -a allen_cord_20um\n
ipython\n
You should see the IPython prompt, that looks like this : In [1]:\n
from abba_python import abba\nabba.start_imagej()\n
The first launch needs to initialize ImageJ and install all required plugins, which takes a while (>5min).Tip
Afterwards, to launch ImageJ from Python and do some registration work, you just need to launch a terminal (PowerShell), and do steps 4., 6., and 7.
"},{"location":"guide-install-abba.html#install-the-automatic-registration-tools_1","title":"Install the automatic registration tools","text":"You can follow the same instructions as the regular Fiji version. You can do it from either the \"normal\" Fiji or the ImageJ instance launched from Python, they share the same configuration files. Therefore, if you already did it in regular Fiji, elastix should already be set up and ready to use in ImageJ from Python.
"},{"location":"guide-install-abba.html#troubleshooting","title":"Troubleshooting","text":""},{"location":"guide-install-abba.html#java_home-errors","title":"JAVA_HOME errors","text":"Unfortunately on some computers, Python does not find the Java virtual machine even though it should have been installed when installing OpenJDK with conda. This will result in an error mentionning \"java.dll\" and suggesting to check the JAVA_HOME
environment variable.
The only fix I could find is to install Java system-wide. You can grab a (free) installer on Adoptium, choosing JRE 17.X for your platform. During the installation :
Restart the terminal and try again. Now, ImageJ should use the system-wide Java and it should work.
"},{"location":"guide-install-abba.html#abba-qupath-extension","title":"ABBA QuPath extension","text":"To import registered regions in your QuPath project and be able to convert objects' coordinates in atlas space, the ABBA QuPath extension is required.
Edit > Preferences
. In the Extension
tab, set your QuPath user directory
to a local directory (usually C:\\Users\\USERNAME\\QuPath\\v0.X.Y
).extensions
in your QuPath user directory.qupath-extension-abba-x.y.z.zip
).extensions
folder in your QuPath user directory.Extensions
, you should have an ABBA
entry.While you can use QuPath and cuisto
functionalities as you see fit, there exists a pipeline version of those. It requires a specific structure to store files (so that the different scripts know where to look for data). It also requires that you have detections stored as geojson files, which can be achieved using a pixel classifier and further segmentation (see here) for example.
This is especially useful to perform quantification for several animals at once, where you'll only need to specify the root directory and the animals identifiers that should be pooled together, instead of having to manually specify each detections and annotations files.
Three main scripts and function are used within the pipeline :
exportPixelClassifierProbabilities.groovy
to create prediction maps of objects of interestsegment_image.py
to segment those maps and create geojson files to be imported back to QuPath as detectionspipelineImportExport.groovy
to :$folderPrefix$segmentation/$segTag$/geojson
)Following a specific directory structure ensures subsequent scripts and functions can find required files. The good news is that this structure will mostly be created automatically using the segmentation scripts (from QuPath and Python), as long as you stay consistent filling the parameters of each script. The structure expected by the groovy all-in-one script and cuisto
batch-process function is the following :
some_directory/\n \u251c\u2500\u2500AnimalID0/ \n \u2502 \u251c\u2500\u2500 animalid0_qupath/\n \u2502 \u2514\u2500\u2500 animalid0_segmentation/ \n \u2502 \u2514\u2500\u2500 segtag/ \n \u2502 \u251c\u2500\u2500 annotations/ \n \u2502 \u251c\u2500\u2500 detections/ \n \u2502 \u251c\u2500\u2500 geojson/ \n \u2502 \u2514\u2500\u2500 probabilities/ \n \u251c\u2500\u2500AnimalID1/ \n \u2502 \u251c\u2500\u2500 animalid1_qupath/\n \u2502 \u2514\u2500\u2500 animalid1_segmentation/ \n \u2502 \u2514\u2500\u2500 segtag/ \n \u2502 \u251c\u2500\u2500 annotations/ \n \u2502 \u251c\u2500\u2500 detections/ \n \u2502 \u251c\u2500\u2500 geojson/ \n \u2502 \u2514\u2500\u2500 probabilities/ \n
Info
Except the root directory and the QuPath project, the rest is automatically created based on the parameters provided in the different scripts. Here's the description of the structure and the requirements :
animalid0
should be a convenient animal identifier.AnimalID0
, can be anything but should correspond to one and only one animal.animalid0
should be lower case.animalid0_qupath
can be named as you wish in practice, but should be the QuPath project.animalid0_segmentation
should be called exactly like this -- replacing animalid0
with the actual animal ID. It will be created automatically with the exportPixelClassifierProbabilities.groovy
script.segtag
corresponds to the type of segmentation (cells, fibers...). It is specified in the exportPixelClassifierProbabilities
script. It could be anything, but to recognize if the objects are polygons (and should be counted per regions) or polylines (and the cumulated length should be measured), there are some hardcoded keywords in the segment_images.py
and pipelineImportExport.groovy
scripts :cells
, cell
, polygons
, polygon
synapto
, synaptophysin
, syngfp
, boutons
, points
fibers
, fiber
, axons
, axon
annotations
contains the atlas regions measurements as TSV files.detections
contains the objects atlas coordinates and measurements as CSV files (for punctal objects) or JSON (for polylines objects).geojson
contains objects stored as geojson files. They could be generated with the pixel classifier prediction map segmentation.probabilities
contains the prediction maps to be segmented by the segment_images.py
script.Tip
You can see an example minimal directory structure with only annotations stored in resources/multi
.
Tip
Remember that this is merely an example pipeline, you can shortcut it at any points, as long as you end up with TSV files following the requirements for cuisto
.
exportPixelClassifierProbabilities.groovy
script. You need to get a pixel classifier or create one.segment_images.py
script to generate the geojson files containing the objects of interest.pipelineImportExport.groovy
script on your QuPath project.import cuisto\n\n# Parameters\nwdir = \"/path/to/some_directory\"\nanimals = [\"AnimalID0\", \"AnimalID1\"]\nconfig_file = \"/path/to/your/config.toml\"\noutput_format = \"h5\" # to save the quantification values as hdf5 file\n\n# Processing\ncfg = cuisto.Config(config_file)\ndf_regions, dfs_distributions, df_coordinates = cuisto.process.process_animals(\n wdir, animals, cfg, out_fmt=output_format\n)\n\n# Display\ncuisto.display.plot_regions(df_regions, cfg)\ncuisto.display.plot_1D_distributions(dfs_distributions, cfg, df_coordinates=df_coordinates)\ncuisto.display.plot_2D_distributions(df_coordinates, cfg)\n
Tip
You can see a live example in this demo notebook.
"},{"location":"guide-prepare-qupath.html","title":"Prepare QuPath data","text":"cuisto
uses some QuPath classifications concepts, make sure to be familiar with them with the official documentation. Notably, we use the concept of primary classification and derived classification : an object classfied as First: second
is of classification First
and of derived classification second
.
cuisto
assumes a specific way of storing regions and objects information in the TSV files exported from QuPath. Note that only one primary classification is supported, but you can have any number of derived classifications.
Detections are the objects of interest. Their information must respect the following :
Atlas_X
, Atlas_Y
, Atlas_Z
. They correspond, respectively, to the anterio-posterior (rostro-caudal) axis, the inferio-superior (dorso-ventral) axis and the left-right (medio-lateral) axis.Primary: second
. Primary would be an object type (cells, fibers, ...), the second one would be a biological marker or a detection channel (fluorescence channel name), for instance : Cells: some marker
, or Fibers: EGFP
.Annotations correspond to the atlas regions. Their information must respect the following :
Hemisphere: acronym
(for ex. Left: PAG
).Primary classification: derived classification measurement name
. For instance : Cells: some marker Count
.Fibers: EGFP Length \u00b5m
.cuisto
","text":"While you're free to add any measurements as long as they follow the requirements, keep in mind that for atlas regions quantification, cuisto
will only compute, pool and average the following metrics :
It is then up to you to select which metrics among those to compute and display and name them, via the configuration file.
For punctal detections (eg. objects whose only the centroid is considered), only the atlas coordinates are used, to compute and display spatial distributions of objects across the brain (using their classifications to give each distributions different hues). For fibers-like objects, it requires to export the lines detections atlas coordinates as JSON files, with the exportFibersAtlasCoordinates.groovy
script (this is done automatically when using the pipeline).
The groovy script under scripts/qupath-utils/measurements/addRegionsCount.groovy
will add a properly formatted count of objects of selected classifications in all atlas regions. This is used for punctual objects (polygons or points), for example objects created in QuPath or with the segmentation script.
The groovy script under scripts/qupath-utils/measurements/addRegionsLength.groovy
will add the properly formatted cumulated lenghth in microns of fibers-like objects in all atlas regions. This is used for polylines objects, for example generated with the segmentation script.
Keeping in mind cuisto
limitations, you can add any measurements you'd like.
For example, you can run a pixel classifier in all annotations (eg. atlas regions). Using the Measure
button, it will add a measurement of the area covered by classified pixels. Then, you can use the script located under scripts/qupath-utils/measurements/renameMeasurements.groovy
to rename the generated measurements with a properly-formatted name. Finally, you can export regions measurements.
Since cuisto
will compute a \"density\", eg. the measurement divided by the region area, in this case, it will correspond to the fraction of surface occupied by classified pixels. This is showcased in the Examples.
Once you imported atlas regions registered with ABBA, detected objects in your images and added properly formatted measurements to detections and annotations, you can :
Measure > Export measurements
Output file
(specify in the file name if it is a detections or annotations file)Detections
or Annoations
in Export type
Export
Do this for both Detections and Annotations, you can then use those files with cuisto
(see the Examples).
The QuPath documentation is quite extensive, detailed, very well explained and contains full guides on how to create a QuPath project and how to find objects of interests. It is therefore a highly recommended read, nevertheless, you will find below some quick reminders.
"},{"location":"guide-qupath-objects.html#qupath-project","title":"QuPath project","text":"QuPath works with projects. It is basically a folder with a main project.qproj
file, which is a JSON file that contains all the data about your images except the images themselves. Algonside, there is a data
folder with an entry for each image, that stores the thumbnails, metadata about the image and detections and annotations but, again, not the image itself. The actual images can be stored anywhere (including a remote server), the QuPath project merely contains the information needed to fetch them and display them. QuPath will never modify your image data.
This design makes the QuPath project itself lightweight (should never exceed 500MB even with millions of detections), and portable : upon opening, if QuPath is not able to find the images where they should be, it will ask for their new locations.
Tip
It is recommended to create the QuPath project locally on your computer, to avoid any risk of conflicts if two people open it at the same time. Nevertheless, you should backup the project regularly on a remote server.
To create a new project, simply drag & drop an empty folder into QuPath window and accept to create a new empty project. Then, add images :
Add images
, then Choose files
on the bottom. Drag & drop does not really work as the images will not be sorted properly.Then, choose the following options :
Image server
Default (let QuPath decide)
Set image type
Most likely, fluorescence
Rotate image
No rotation (unless all your images should be rotated)
Optional args
Leave empty
Auto-generate pyramids
Uncheck
Import objects
Uncheck
Show image selector
Might be useful to check if the images are read correctly (mostly for CZI files).
"},{"location":"guide-qupath-objects.html#detect-objects","title":"Detect objects","text":""},{"location":"guide-qupath-objects.html#built-in-cell-detection","title":"Built-in cell detection","text":"QuPath has a built-in cell detection feature, available in Analyze > Cell detection
. You hava a full tutorial in the official documentation.
Briefly, this uses a watershed algorithm to find bright spots and can perform a cell expansion to estimate the full cell shape based on the detected nuclei. Therefore, this works best to segment nuclei but one can expect good performance for cells as well, depending on the imaging and staining conditions.
Tip
In scripts/qupath-utils/segmentation
, there is watershedDetectionFilters.groovy
which uses this feature from a script. It further allows you to filter out detected cells based on shape measurements as well as fluorescence itensity in several channels and cell compartments.
Another very powerful and versatile way to segment cells if through machine learning. Note the term \"machine\" and not \"deep\" as it relies on statistics theory from the 1980s. QuPath provides an user-friendly interface to that, similar to what ilastik provides.
The general idea is to train a model to classify every pixel as a signal or as background. You can find good resources on how to procede in the official documentation and some additionnal tips and tutorials on Michael Neslon's blog (here and here).
Specifically, you will manually annotate some pixels of objects of interest and background. Then, you will apply some image processing filters (gaussian blur, laplacian...) to reveal specific features in your images (shapes, textures...). Finally, the pixel classifier will fit a model on those pixel values, so that it will be able to predict if a pixel, given the values with the different filters you applied, belongs to an object of interest or to the background.
This is done in an intuitive GUI with live predictions to get an instant feedback on the effects of the filters and manual annotations.
"},{"location":"guide-qupath-objects.html#train-a-model","title":"Train a model","text":"First and foremost, you should use a QuPath project dedicated to the training of a pixel classifier, as it is the only way to be able to edit it later on.
Classify > Pixel classification > Train pixel classifier
, and turn on Live prediction
.Load training
.Advanced settings
, check Reweight samples
to help make sure a classification is not over-represented.Classifier
: typically, RTrees
or ANN_MLP
. This can be changed dynamically afterwards to see which works best for you.Resolution
: this is the pixel size used. This is a trade-off between accuracy and speed. If your objects are only composed of a few pixels, you'll the full resolution, for big objects reducing the resolution will be faster.Features
: this is the core of the process -- where you choose the filters. In Edit
, you'll need to choose :Output
:Classification
: QuPath will directly classify the pixels. Use that to create objects directly from the pixel classifier within QuPath.Probability
: this will output an image where each pixel is its probability to belong to each of the classifications. This is useful to create objects externally.Show classification
once you begin to make annotations.Begin to annotate ! Use the Polyline annotation tool (V) to classify some pixels belonging to an object and some pixels belonging to the background across your images.
Tip
You can select the RTrees
Classifier, then Edit
: check the Calculate variable importance
checkbox. Then in the log (Ctrl+Shift+L), you can inspect the weight each features have. This can help discard some filters to keep only the ones most efficient to distinguish the objects of interest.
See in live the effect of your annotations on the classification using C and continue until you're satisfied.
Important
This is machine learning. The lesser annotations, the better, as this will make your model more general and adapt to new images. The goal is to find the minimal number of annotations to make it work.
Once you're done, give your classifier a name in the text box in the bottom and save it. It will be stored as a JSON file in the classifiers
folder of the QuPath project. This file can be imported in your other QuPath projects.
Once you imported your model JSON file (Classify > Pixel classification > Load pixel classifier
, three-dotted menu and Import from file
), you can create objects out of it, measure the surface occupied by classified pixels in each annotation or classify existing detections based on the prediction at their centroid.
In scripts/qupath-utils/segmentation
, there is a createDetectionsFromPixelClassifier.groovy
script to batch-process your project.
Alternatively, a Python script provided with cuisto
can be used to segment the probability map generated by the pixel classifier (the script is located in scripts/segmentation
).
You will first need to export those with the exportPixelClassifierProbabilities.groovy
script (located in scripts/qupath-utils
).
Then the segmentation script can :
Several parameters have to be specified by the user, see the segmentation script API reference. This script will generate GeoJson files that can be imported back to QuPath with the importGeojsonFiles.groovy
script.
QuPath being open-source and extensible, there are third-party extensions that implement popular deep learning segmentation algorithms directly in QuPath. They can be used to find objects of interest as detections in the QuPath project and thus integrate nicely with cuisto
to quantify them afterwards.
QuPath extension : https://github.com/qupath/qupath-extension-instanseg Original repository : https://github.com/instanseg/instanseg Reference papers : doi:10.48550/arXiv.2408.15954, doi:10.1101/2024.09.04.611150
"},{"location":"guide-qupath-objects.html#stardist","title":"Stardist","text":"QuPath extension : https://github.com/qupath/qupath-extension-stardist Original repository : https://github.com/stardist/stardist Reference paper : doi:10.48550/arXiv.1806.03535
There is a stardistDetectionFilter.groovy
script in scripts/qupath-utils/segmentation
to use it from a script which further allows you to filter out detected cells based on shape measurements as well as fluorescence itensity in several channels and cell compartments.
QuPath extension : https://github.com/BIOP/qupath-extension-cellpose Original repository : https://github.com/MouseLand/cellpose Reference papers : doi:10.1038/s41592-020-01018-x, doi:10.1038/s41592-022-01663-4, doi:10.1101/2024.02.10.579780
There is a cellposeDetectionFilter.groovy
script in scripts/qupath-utils/segmentation
to use it from a script which further allows you to filter out detected cells based on shape measurements as well as fluorescence itensity in several channels and cell compartments.
QuPath extension : https://github.com/ksugar/qupath-extension-sam Original repositories : samapi, SAM Reference papers : doi:10.1101/2023.06.13.544786, doi:10.48550/arXiv.2304.02643
This is more an interactive annotation tool than a fully automatic segmentation algorithm.
"},{"location":"guide-register-abba.html","title":"Registration with ABBA","text":"The ABBA documentation is quite extensive and contains guided tutorials and a video tutorial. You should therefore check it out ! Nevertheless, you will find below some quick reminders.
"},{"location":"guide-register-abba.html#import-a-qupath-project","title":"Import a QuPath project","text":"Always use ABBA with a QuPath project, if you import the images directly it will not be possible to export the results back to QuPath. In the toolbar, head to Import > Import QuPath Project
.
Warning
ABBA is not the most stable software, it is highly recommended to save in a different file each time you do anything.
"},{"location":"guide-register-abba.html#navigation","title":"Navigation","text":""},{"location":"guide-register-abba.html#interface","title":"Interface","text":"In the right panel, there is everything related to the images, both yours and the atlas.
In the Atlas Display
section, you can turn on and off different channels (the first is the reference image, the last is the regions outlines). The Displayed slicing [atlas steps]
slider can increase or decrease the number of displayed 2D slices extracted from the 3D volume. It is comfortable to set to to the same spacing as your slices. Remember it is in \"altas steps\", so for an atlas imaged at 10\u00b5m, a 120\u00b5m spacing corresponds to 12 atlas steps.
The Slices Display
section lists all your slices. Ctrl+A to select all, and click on the Vis.
header to make them visible. Then, you can turn on and off each channels (generally the NISSL channel and the ChAT channel will be used) by clicking on the corresponding header. Finally, set the display limits clicking on the empty header containing the colors.
Right Button in the main view to Change overlap mode
twice to get the slices right under the atlas slices.
Tip
Every action in ABBA are stored and are cancellable with Right Button+Z, except the Interactive transform.
"},{"location":"guide-register-abba.html#find-position-and-angle","title":"Find position and angle","text":"This is the hardest task. You need to drag the slices along the rostro-caudal axis and modify the virtual slicing angle (X Rotation [deg]
and Y Rotation [deg]
sliders at the bottom of the right panel) until you match the brain structures observed in both your images and the atlas.
Tip
With a high number of slices, most likely, it will be impossible to find a position and slicing angle that works for all your slices. In that case, you should procede in batch, eg. sub-stack of images with a unique position and slicing angle that works for all images in the sub-stack. Then, remove the remaining slices (select them, Right Button > Remove Selected Slices
), but do not remove them from the QuPath project.
Procede as usual, including saving (note the slices range it corresponds to) and exporting the registration back to QuPath. Then, reimport the project in a fresh ABBA instance, remove the slices that were already registered and redo the whole process with the next sub-stack and so on.
Once you found the correct position and slicing angle, it must not change anymore, otherwise the registration operations you perform will not make any sense anymore.
"},{"location":"guide-register-abba.html#in-plane-registration","title":"In-plane registration","text":"The next step is to deform your slices to match the corresponding atlas image, extracted from the 3D volume given the position and virtual slicing angle defined at the previous step.
Info
ABBA makes the choice to deform your slices to the atlas, but the transformations are invertible. This means that you will still be able to work on your raw data and deform the altas onto it instead.
In image processing, there are two kinds of deformation one can apply on an image :
Both can be applied manually or automatically (if the imaging quality allows it). You have different tools to achieve this, all of which can be combined in any order, except the Interactive transform tool (coarse, linear manual deformation).
Change the overlap mode (Right Button) to overlay the slice onto the atlas regions borders. Select the slice you want to align.
"},{"location":"guide-register-abba.html#coarse-linear-manual-deformation","title":"Coarse, linear manual deformation","text":"While not mandatory, if this tool shall be used, it must be before any operation as it is not cancellable. Head to Register > Affine > Interactive transform
. This will open a box where you can rotate, translate and resize the image to make a first, coarse alignment.
Close the box. Again, this is not cancellable. Afterwards, you're free to apply any numbers of transformations in any order.
"},{"location":"guide-register-abba.html#automatic-registration","title":"Automatic registration","text":"This uses the elastix toolbox to compute the transformations needed to best match two images. It is available in both affine and spline mode, in the Register > Affine
and Register > Spline
menus respectively.
In both cases, it will open a dialog where you need to choose :
For the Spline mode, there an additional parameter :
This uses BigWarp to manually deform the images with the mouse. It can be done from scratch (eg. you place the points yourself) or from a previous registration (either a previous BigWarp session or elastix in Spline mode).
"},{"location":"guide-register-abba.html#from-scratch","title":"From scratch","text":"Register > Spline > BigWarp registration
to launch the tool. Choose the atlas that allows you to best see the brain structures (usually the regions outlines channels, the last one), and the reference fluorescence channel.
It will open two viewers, called \"BigWarp moving image\" and \"BigWarp fixed image\". Briefly, they correspond to the two spaces you're working in, the \"Atlas space\" and the \"Slice space\".
Tip
Do not panick yet, while the explanations might be confusing (at least they were to me), in practice, it is easy, intuitive and can even be fun (sometimes, at small dose).
To browse the viewer, use Right Button + drag (Left Button is used to rotate the viewer), Middle Button zooms in and out.
The idea is to place points, called landmarks, that always go in pairs : one in the moving image and one where it corresponds to in the fixed image (or vice-versa). In practice, we will only work in the BigWarp fixed image viewer to place landmarks in both space in one click, then drag it to the corresponding location, with a live feedback of the transformation needed to go from one to another.
To do so :
Press Space to switch to the \"Landmark mode\".
Warning
In \"Landmark mode\", Right Button can't be used to browse the view anymore. To do so, turn off the \"Landmark mode\" hitting Space again.
Use Ctrl+Left Button to place a landmark.
Info
At least 4 landmarks are needed before activating the live-transform view.
When there are at least 4 landmarks, hit T to activate the \"Transformed\" view. Transformed
will be written at the bottom.
OK
.Important remarks and tips
Head to Register > Edit last Registration
to work on a previous registration.
If the previous registration was done with elastix (Spline) or BigWarp, it will launch the BigWarp interface exactly like above, but with landmarks already placed, either on a grid (elastix) or the one you manually placed (BigWarp).
Tip
It will ask which channels to use, you can modify the channel for your slices to work on two channels successively. For instance, one could make a first registration using the NISSL staining, then refine the motoneurons with the ChAT staining, if available.
"},{"location":"guide-register-abba.html#abba-state-file","title":"ABBA state file","text":"ABBA can save the state you're in, from the File > Save State
menu. It will be saved as a .abba
file, which is actually a zip archive containing a bunch of JSON, listing every actions you made and in which order, meaning you will stil be able to cancel actions after quitting ABBA.
To load a state, quit ABBA, launch it again, then choose File > Load State
and select the .abba
file to carry on with the registration.
Save, save, save !
Those state files are cheap, eg. they are lightweight (less than 200KB). You should save the state each time you finish a slice, and you can keep all your files, without overwritting the previous ones, appending a number to its file name. This will allow to roll back to the previous slice in the event of any problem you might face.
"},{"location":"guide-register-abba.html#export-registration-back-to-qupath","title":"Export registration back to QuPath","text":""},{"location":"guide-register-abba.html#export-the-registration-from-abba","title":"Export the registration from ABBA","text":"Once you are satisfied with your registration, select the registered slices and head to Export > QuPath > Export Registrations To QuPath Project
. Check the box to make sure to get the latest registered regions.
It will export several files in the QuPath projects, including the transformed atlas regions ready to be imported in QuPath and the transformations parameters to be able to convert coordinates from the extension.
"},{"location":"guide-register-abba.html#import-the-registration-in-qupath","title":"Import the registration in QuPath","text":"Make sure you installed the ABBA extension in QuPath.
From your project with an image open, the basic usage is to head to Extensions > ABBA > Load Atlas Annotations into Open Image
. Choose to Split Left and Right Regions
to make the two hemispheres independent, and choose the \"acronym\" to name the regions. The registered regions should be imported as Annotations in the image.
Tip
With ABBA in regular Fiji using the CCFv3 Allen mouse brain atlas, the left and right regions are flipped, because ABBA considers the slices as backward facing. The importAbba.groovy
script located in scripts/qupath-utils-atlas
allows you to flip left/right regions names. This is OK because the Allen brain is symmetrical by construction.
For more complex use, check the Groovy scripts in scripts/qupath-utils/atlas
. ABBA registration is used throughout the guides, to either work with brain regions (and count objects for instance) or to get the detections' coordinates in the atlas space.
While cuisto
does not have a reference paper as of now, you can reference the GitHub repository.
Please make sure to cite all the softwares used in your research. Citations are usually the only metric used by funding agencies, so citing properly the tools used in your research ensures the continuation of those projects.
There are three configuration files : altas_blacklist
, atlas_fusion
and a modality-specific file, that we'll call config
in this document. The former two are related to the atlas you're using, the latter is what is used by cuisto
to know what and how to compute and display things. There is a fourth, optional, file, used to provide some information on a specific experiment, info
.
The configuration files are in the TOML file format, that are basically text files formatted in a way that is easy to parse in Python. See here for a basic explanation of the syntax.
Most lines of each template file are commented to explain what each parameter do.
"},{"location":"main-configuration-files.html#atlas_blacklisttoml","title":"atlas_blacklist.toml","text":"Click to see an example file atlas_blacklist.toml# TOML file to list Allen brain regions to ignore during analysis.\n# \n# It is used to blacklist regions and all descendants regions (\"WITH_CHILD\").\n# Objects belonging to those regions and their descendants will be discarded.\n# And you can specify an exact region where to remove objects (\"EXACT\"),\n# descendants won't be affected.\n# Use it to remove noise in CBX, ventricual systems and fiber tracts.\n# Regions are referenced by their exact acronym.\n#\n# Syntax :\n# [WITH_CHILDS]\n# members = [\"CBX\", \"fiber tracts\", \"VS\"]\n#\n# [EXACT]\n# members = [\"CB\"]\n\n\n[WITH_CHILDS]\nmembers = [\"CBX\", \"fiber tracts\", \"VS\"]\n\n[EXACT]\nmembers = [\"CB\"]\n
This file is used to filter out specified regions and objects belonging to them.
members
keys will be ignored. Objects whose parents are in here will be ignored as well.[WITH_CHILDS]
section, regions and objects belonging to those regions and all descending regions (child regions, as per the altas hierarchy) will be removed.[EXACT]
section, only regions and objects belonging to those exact regions are removed. Descendants regions are not taken into account.# TOML file to determine which brain regions should be merged together.\n# Regions are referenced by their exact acronym.\n# The syntax should be the following :\n# \n# [MY]\n# name = \"Medulla\" # new or existing full name\n# acronym = \"MY\" # new or existing acronym\n# members = [\"MY-mot\", \"MY-sat\"] # existing Allen Brain acronyms that should belong to the new region\n#\n# Then, regions labelled \"MY-mot\" and \"MY-sat\" will be labelled \"MY\" and will join regions already labelled \"MY\".\n# What's in [] does not matter but must be unique and is used to group.\n# The new \"name\" and \"acronym\" can be existing Allen Brain regions or a new (meaningful) one.\n# Note that it is case sensitive.\n\n[PHY]\nname = \"Perihypoglossal nuclei\"\nacronym = \"PHY\"\nmembers = [\"NR\", \"PRP\"]\n\n[NTS]\nname = \"Nucleus of the solitary tract\"\nacronym = \"NTS\"\nmembers = [\"ts\", \"NTSce\", \"NTSco\", \"NTSge\", \"NTSl\", \"NTSm\"]\n\n[AMB]\nname = \"Nucleus ambiguus\"\nacronym = \"AMB\"\nmembers = [\"AMBd\", \"AMBv\"]\n\n[MY]\nname = \"Medulla undertermined\"\nacronym = \"MYu\"\nmembers = [\"MY-mot\", \"MY-sat\"]\n\n[IRN]\nname = \"Intermediate reticular nucleus\"\nacronym = \"IRN\"\nmembers = [\"IRN\", \"LIN\"]\n
This file is used to group regions together, to customize the atlas' hierarchy. It is particularly useful to group smalls brain regions that are impossible to register precisely. Keys name
, acronym
and members
should belong to a [section]
.
[section]
is just for organizing, the name does not matter but should be unique.name
should be a human-readable name for your new region.acronym
is how the region will be refered to. It can be a new acronym, or an existing one.members
is a list of acronyms of atlas regions that should be part of the new one.########################################################################################\n# Configuration file for cuisto package\n# -----------------------------------------\n# This is a TOML file. It maps a key to a value : `key = value`.\n# Each key must exist and be filled. The keys' names can't be modified, except:\n# - entries in the [channels.names] section and its corresponding [channels.colors] section,\n# - entries in the [regions.metrics] section. \n#\n# It is strongly advised to NOT modify this template but rather copy it and modify the copy.\n# Useful resources :\n# - the TOML specification : https://toml.io/en/\n# - matplotlib colors : https://matplotlib.org/stable/gallery/color/color_demo.html\n#\n# Configuration file part of the python cuisto package.\n# version : 2.1\n########################################################################################\n\nobject_type = \"Cells\" # name of QuPath base classification (eg. without the \": subclass\" part)\nsegmentation_tag = \"cells\" # type of segmentation, matches directory name, used only in the full pipeline\n\n[atlas] # information related to the atlas used\nname = \"allen_mouse_10um\" # brainglobe-atlasapi atlas name\ntype = \"brain\" # brain or cord (eg. registration done in ABBA or abba_python)\nmidline = 5700 # midline Z coordinates (left/right limit) in microns\noutline_structures = [\"root\", \"CB\", \"MY\", \"P\"] # structures to show an outline of in heatmaps\n\n[channels] # information related to imaging channels\n[channels.names] # must contain all classifications derived from \"object_type\"\n\"marker+\" = \"Positive\" # classification name = name to display\n\"marker-\" = \"Negative\"\n[channels.colors] # must have same keys as names' keys\n\"marker+\" = \"#96c896\" # classification name = matplotlib color (either #hex, color name or RGB list)\n\"marker-\" = \"#688ba6\"\n\n[hemispheres] # information related to hemispheres\n[hemispheres.names]\nLeft = \"Left\" # Left = name to display\nRight = \"Right\" # Right = name to display\n[hemispheres.colors] # must have same keys as names' keys\nLeft = \"#ff516e\" # Left = matplotlib color (either #hex, color name or RGB list)\nRight = \"#960010\" # Right = matplotlib color\n\n[distributions] # spatial distributions parameters\nstereo = true # use stereotaxic coordinates (Paxinos, only for brain)\nap_lim = [-8.0, 0.0] # bins limits for anterio-posterior\nap_nbins = 75 # number of bins for anterio-posterior\ndv_lim = [-1.0, 7.0] # bins limits for dorso-ventral\ndv_nbins = 50 # number of bins for dorso-ventral\nml_lim = [-5.0, 5.0] # bins limits for medio-lateral\nml_nbins = 50 # number of bins for medio-lateral\nhue = \"channel\" # color curves with this parameter, must be \"hemisphere\" or \"channel\"\nhue_filter = \"Left\" # use only a subset of data. If hue=hemisphere : channel name, list of such or \"all\". If hue=channel : hemisphere name or \"both\".\ncommon_norm = true # use a global normalization for each hue (eg. the sum of areas under all curves is 1)\n[distributions.display]\nshow_injection = false # add a patch showing the extent of injection sites. Uses corresponding channel colors\ncmap = \"OrRd\" # matplotlib color map for heatmaps\ncmap_nbins = 50 # number of bins for heatmaps\ncmap_lim = [1, 50] # color limits for heatmaps\n\n[regions] # distributions per regions parameters\nbase_measurement = \"Count\" # the name of the measurement in QuPath to derive others from\nhue = \"channel\" # color bars with this parameter, must be \"hemisphere\" or \"channel\"\nhue_filter = \"Left\" # use only a subset of data. If hue=hemisphere : channel name, list of such or \"all\". If hue=channel : hemisphere name or \"both\".\nhue_mirror = false # plot two hue_filter in mirror instead of discarding the other\nnormalize_starter_cells = false # normalize non-relative metrics by the number of starter cells\n[regions.metrics] # names of metrics. Do not change the keys !\n\"density \u00b5m^-2\" = \"density \u00b5m^-2\"\n\"density mm^-2\" = \"density mm^-2\"\n\"coverage index\" = \"coverage index\"\n\"relative measurement\" = \"relative count\"\n\"relative density\" = \"relative density\"\n[regions.display]\nnregions = 18 # number of regions to display (sorted by max.)\norientation = \"h\" # orientation of the bars (\"h\" or \"v\")\norder = \"max\" # order the regions by \"ontology\" or by \"max\". Set to \"max\" to provide a custom order\ndodge = true # enforce the bar not being stacked\nlog_scale = false # use log. scale for metrics\n[regions.display.metrics] # name of metrics to display\n\"count\" = \"count\" # real_name = display_name, with real_name the \"values\" in [regions.metrics]\n\"density mm^-2\" = \"density (mm^-2)\"\n\n[files] # full path to information TOML files\nblacklist = \"../../atlas/atlas_blacklist.toml\"\nfusion = \"../../atlas/atlas_fusion.toml\"\noutlines = \"/data/atlases/allen_mouse_10um_outlines.h5\"\ninfos = \"../../configs/infos_template.toml\"\n
This file is used to configure cuisto
behavior. It specifies what to compute, how, and display parameters such as colors associated to each classifications, hemisphere names, distributions bins limits...
Warning
When editing your config.toml file, you're allowed to modify the keys only in the [channels]
section.
object_type
: name of QuPath base classification (eg. without the \": subclass\" part) segmentation_tag
: type of segmentation, matches directory name, used only in the full pipeline
atlas Information related to the atlas used
name
: brainglobe-atlasapi atlas name type
: \"brain\" or \"cord\" (eg. registration done in ABBA or abba_python). This will determine whether to flip Left/Right when determining detections hemisphere based on their coordinates. Also adapts the axes in the 2D heatmaps. midline
: midline Z coordinates (left/right limit) in microns to determine detections hemisphere based on their coordinates. outline_structures
: structures to show an outline of in heatmaps
channels Information related to imaging channels
names Must contain all classifications derived from \"object_type\" you want to process. In the form subclassification name = name to display on the plots
\"marker+\"
: classification name = name to display \"marker-\"
: add any number of sub-classification
colors Must have same keys as \"names\" keys, in the form subclassification name = color
, with color specified as a matplotlib named color, an RGB list or an hex code.
\"marker+\"
: classification name = matplotlib color \"marker-\"
: must have the same entries as \"names\".
hemispheres Information related to hemispheres, same structure as channels
names
Left
: Left = name to display Right
: Right = name to display
colors Must have same keys as names' keys
Left
: ff516e\" # Left = matplotlib color (either #hex, color name or RGB list) Right
: 960010\" # Right = matplotlib color
distributions Spatial distributions parameters
stereo
: use stereotaxic coordinates (as in Paxinos, only for mouse brain CCFv3) ap_lim
: bins limits for anterio-posterior in mm ap_nbins
: number of bins for anterio-posterior dv_lim
: bins limits for dorso-ventral in mm dv_nbins
: number of bins for dorso-ventral ml_lim
: bins limits for medio-lateral in mm ml_nbins
: number of bins for medio-lateral hue
: color curves with this parameter, must be \"hemisphere\" or \"channel\" hue_filter
: use only a subset of data
common_norm
: use a global normalization (eg. the sum of areas under all curves is 1). Otherwise, normalize each hue individually
display Display parameters
show_injection
: add a patch showing the extent of injection sites. Uses corresponding channel colors. Requires the information TOML configuration file set up cmap
: matplotlib color map for 2D heatmaps cmap_nbins
: number of bins for 2D heatmaps cmap_lim
: color limits for 2D heatmaps
regions Distributions per regions parameters
base_measurement
: the name of the measurement in QuPath to derive others from. Usually \"Count\" or \"Length \u00b5m\" hue
: color bars with this parameter, must be \"hemisphere\" or \"channel\" hue_filter
: use only a subset of data
hue_mirror
: plot two hue_filter in mirror instead of discarding the others. For example, if hue=channel and hue_filter=\"both\", plots the two hemisphere in mirror. normalize_starter_cells
: normalize non-relative metrics by the number of starter cells
metrics Names of metrics. The keys are used internally in cuisto as is so should NOT be modified. The values will only chang etheir names in the ouput file
\"density \u00b5m^-2\"
: relevant name \"density mm^-2\"
: relevant name \"coverage index\"
: relevant name \"relative measurement\"
: relevant name \"relative density\"
: relevant name
display
nregions
: number of regions to display (sorted by max.) orientation
: orientation of the bars (\"h\" or \"v\") order
: order the regions by \"ontology\" or by \"max\". Set to \"max\" to provide a custom order dodge
: enforce the bar not being stacked log_scale
: use log. scale for metrics
metrics name of metrics to display
\"count\"
: real_name = display_name, with real_name the \"values\" in [regions.metrics] \"density mm^-2\"
files Full path to information TOML files and atlas outlines for 2D heatmaps.
blacklist
fusion
outlines
infos
# TOML file to specify experimental settings of each animals.\n# Syntax should be :\n# [animalid0] # animal ID\n# slice_thickness = 30 # slice thickness in microns\n# slice_spacing = 60 # spacing between two slices in microns\n# [animalid0.marker-name] # [{Animal id}.{segmented channel name}]\n# starter_cells = 190 # number of starter cells\n# injection_site = [x, y, z] # approx. injection site in CCFv3 coordinates\n#\n# --------------------------------------------------------------------------\n[animalid0]\nslice_thickness = 30\nslice_spacing = 60\n[animalid0.\"marker+\"]\nstarter_cells = 150\ninjection_site = [ 10.8937328, 6.18522070, 6.841855301 ]\n[animalid0.\"marker-\"]\nstarter_cells = 175\ninjection_site = [ 10.7498512, 6.21545461, 6.815487203 ]\n# --------------------------------------------------------------------------\n[animalid1-SC]\nslice_thickness = 30\nslice_spacing = 120\n[animalid1-SC.EGFP]\nstarter_cells = 250\ninjection_site = [ 10.9468211, 6.3479642, 6.0061113 ]\n[animalid1-SC.DsRed]\nstarter_cells = 275\ninjection_site = [ 10.9154874, 6.2954872, 8.1587125 ]\n# --------------------------------------------------------------------------\n
This file is used to specify injection sites for each animal and each channel, to display it in distributions.
"},{"location":"main-getting-help.html","title":"Getting help","text":"For help in QuPath, ABBA, Fiji or any image processing-related questions, your one stop is the image.sc forum. There, you can search with specific tags (#qupath
, #abba
, ...). You can also ask questions or even answer to some by creating an account !
For help with cuisto
in particular, you can open an issue in Github (which requires an account as well), or send an email to me or Antoine Lesage.
conda create -c conda-forge -n cuisto-env python=3.12\n
conda activate cuisto-env\n
cuisto-xxx
folder : pip install .\n
If you want to build the doc : pip install .[doc]\n
Tip
If all goes well, you shouldn't need any admin rights to install the various pieces of software used before cuisto
.
Important
Remember to cite all softwares you use ! See Citing.
"},{"location":"main-getting-started.html#qupath","title":"QuPath","text":"QuPath is an \"open source software for bioimage analysis\". You can install it from the official website : https://qupath.github.io/. The documentation is quite clear and comprehensive : https://qupath.readthedocs.io/en/stable/index.html.
This is where you'll create QuPath projects, in which you'll be able to browse your images, annotate them, import registered brain regions and find objects of interests (via automatic segmentation, thresholding, pixel classification, ...). Then, those annotations and detections can be exported to be processed by cuisto
.
This is the tool you'll use to register 2D histological sections to 3D atlases. See the dedicated page.
"},{"location":"main-getting-started.html#python-virtual-environment-manager-conda","title":"Python virtual environment manager (conda
)","text":"The cuisto
package is written in Python. It depends on scientific libraries (such as NumPy, pandas and many more). Those libraries need to be installed in versions that are compatible with each other and with cuisto
. To make sure those versions do not conflict with other Python tools you might be using (deeplabcut
, abba_python
, ...), we will install cuisto
and its dependencies in a dedicated virtual environment.
conda
is a software that takes care of this. It comes with a \"base\" environment, from which we will create and manage other, project-specific environments. It is also used to download and install python in each of those environments, as well as third-party libraries. conda
in itself is free and open-source and can be used freely by anyone.
It is included with the Anaconda distribution, which is subject to specific terms of service, which state that unless you're an individual, a member of a company with less than 200 employees or a member of an university (but not a national research lab) it's free to use, otherwise, you need to pay a licence. conda
, while being free, is by default configured to use the \"defaults\" channel to fetch the packages (including Python itself), a repository operated by Anaconda, which is, itself, subject to the Anaconda terms of service.
In contrast, conda-forge is a community-run repository that contains more numerous and more update-to-date packages. This is free to use for anyone. The idea is to use conda
directly (instead of Anaconda graphical interface) and download packages from conda-forge (instead of the Anaconda-run defaults). To try to decipher this mess, Anaconda provides this figure :
Furthermore, the \"base\" conda environment installed with the Anaconda distribution is bloated and already contains tons of libraries, and tends to self-destruct at some point (eg. becomes unable to resolve the inter-dependencies), which makes you unable to install new libraries nor create new environments.
This is why it is highly recommended to install Miniconda instead, a minimal installer for conda, and configure it to use the free, community-run channel conda-forge, or, even better, use Miniforge which is basically the same but pre-configured to use conda-forge. The only downside is that will not get the Anaonda graphical user interface and you'll need to use the terminal instead, but worry not ! We got you covered.
conda init\n
This will activate conda and its base environment whenever you open a new PowerShell window. Now, when opening a new PowerShell (or terminal), you should see a prompt like this : (base) PS C:\\Users\\myname>\n
Tip
If Anaconda is already installed and you don't have the rights to uninstall it, you'll have to use it instead. You can launch the \"Anaconda Prompt (PowerShell)\", run conda init
. Open a regular PowerShell window and run conda config --add channels conda-forge
, so that subsequent installations and environments creation will fetch required dependencies from conda-forge.
This section explains how to actually install the cuisto
package. The following commands should be run from a terminal (PowerShell). Remember that the -c conda-forge
bits are not necessary if you installed conda with the miniforge distribution.
conda create -c conda-forge -n cuisto-env python=3.12\n
cuisto
Source code .zip package, from the Releases page.cuisto-env
environment we just created. First, you need to activate the cuisto-env
environment : conda activate cuisto-env\n
Now, the prompt should look like this : (cuisto-env) PS C:\\Users\\myname>\n
This means that Python packages will now be installed in the cuisto-env
environment and won't conflict with other toolboxes you might be using. Then, we use pip
to install cuisto
. pip
was installed with Python, and will scan the cuisto
folder, specifically the \"pyproject.toml\" file that lists all the required dependencies. To do so, you can either :pip install /path/to/cuisto\n
cd /path/to/cuisto\n
Then install the package, \".\" denotes \"here\" : pip install .\n
cuisto
folder, use Shift+Right Button to \"Open PowerShell window here\" and run : pip install .\n
cuisto
is now installed inside the cuisto-env
environment and will be available in Python from that environment !
Tip
You will need to perform step 3. each time you want to update the package.
If you already have registered data and cells in QuPath, you can export Annotations and Detections as TSV files and head to the Example section.
"},{"location":"main-using-notebooks.html","title":"Using notebooks","text":"A Jupyter notebook is a way to use Python in an interactive manner. It uses cells that contain Python code, and that are to be executed to immediately see the output, including figures.
You can see some rendered notebooks in the examples here, but you can also download them (downward arrow button on the top right corner of each notebook) and run them locally with your own data.
To do so, you can either use an integrated development environment (basically a supercharged text editor) that supports Jupyter notebooks, or directly the Jupyter web interface.
IDEJupyter web interfaceYou can use for instance Visual Studio Code, also known as vscode.
cd Documents\\notebooks
or, in the file explorer in your \"notebooks\" folder, Shift+Right Button to \"Open PowerShell window here\")conda activate cuisto-env\n
jupyter lab\n
This should open a web page where you can open the ipynb files.With cuisto
, it is possible to plot 2D heatmaps on brain contours.
All the detections are projected in a single plane, thus it is up to you to select a relevant data range. It is primarily intended to give a quick, qualitative overview of the spreading of your data.
To do so, it requires the brain regions outlines, stored in a hdf5 file. This can be generated with brainglobe-atlasapi
. The generate_atlas_outlines.py
located in scripts/atlas
will show you how to make such a file, that the cuisto.display
module can use.
Alternatively it is possible to directly plot density maps without cuisto
, using brainglobe-heatmap
. An example is shown here.
The representation of an image in a computer is basically a table where each element represents the pixel value (see more here). It can be n-dimensional, where the typical dimensions would be \\((x, y, z)\\), time and the fluorescence channels.
In large images, such as histological slices that are more than 10000\\(\\times\\)10000 pixels, a strategy called tiling is used to optimize access to specific regions in the image. Storing the whole image at once in a file would imply to load the whole thing at once in the memory (RAM), even though one would only need to access a given rectangular region with a given zoom. Instead, the image is stored as tiles, small squares (512--2048 pixels) that pave the whole image and are used to reconstruct the original image. Therefore, when zooming-in, only the relevant tiles are loaded and displayed, allowing for smooth large image navigation. This process is done seamlessly by software like QuPath and BigDataViewer (the Fiji plugin ABBA is based on) when loading tiled images. This is also leveraged for image processing in QuPath, which will work on tiles instead of the whole image to not saturate your computer RAM.
Most images are already tiled, including Zeiss CZI images. Note that those tiles do not necessarily correspond to the actual, real-world, tiles the microscope did to image the whole slide.
"},{"location":"tips-formats.html#pyramids","title":"Pyramids","text":"In the same spirit as tiles, it would be a waste to have to load the entire image (and all the tiles) at once when viewing the image at max zoom-out, as your monitor nor your eyes would handle it. Instead, smaller, rescaled versions of the original image are stored alongside it, and depending on the zoom you are using, the sub-resolution version is displayed. Again, this is done seamlessly by QuPath and ABBA, allowing you to quickly switch from an image to another, without having to load the GB-sized image. Also, for image processing that does not require the original pixel size, QuPath can also leverage pyramids to go faster.
Usually, upon openning a CZI file in ZEN, there is a pop-up suggesting you to generate pyramids. It is a very good idea to say yes, wait a bit and save the file so that the pyramidal levels are saved within the file.
"},{"location":"tips-formats.html#metadata","title":"Metadata","text":"Metadata, while often overlooked, are of paramount importance in microscopy data. It allows both softwares and users to interpret the raw data of images, eg. the values of each pixels. Most image file formats support this, including the microcope manufacturer file formats. Metadata may include :
Pixel size is the parameter that is absolutely necessary. Channel names and colors are more a quality of life feature, to make sure not to mix your difference fluorescence channels. CZI files or exported OME-TIFF files include this out of the box so you don't really need to pay attention.
"},{"location":"tips-formats.html#bio-formats","title":"Bio-formats","text":"Bio-formats is an initiative of the Open Microscopy Environment (OME) consortium, aiming at being able to read proprietary microscopy image data and metadata. It is used in QuPath, Fiji and ABBA.
This page summarizes the level of support of numerous file formats. You can see that Zeiss CZI files and Leica LIF are quite well supported, and should therefore work out of the box in QuPath.
"},{"location":"tips-formats.html#zeiss-czi-files","title":"Zeiss CZI files","text":"QuPath and ABBA supports any Bio-formats supported, tiled, pyramidal images.
If you're in luck, adding the pyramidal CZI file to your QuPath project will just work. If it doesn't, you'll notice immediately : the tiles will be shuffled and you'll see only a part of the image instead of the whole one. Unfortunately I was not able to determine why this happens and did not find a way to even predict if a file will or will not work.
In the event you experience this bug, you'll need to export the CZI files to OME-TIFF files from ZEN, then generate tiled pyramidal images with the pyramid-creator
package that you can find here.
Markdown is a markup language to create formatted text. It is basically a simple text file that could be opened with any text editor software (notepad and the like), but features specific tags to format the text with heading levels, typesetting (bold, itallic), links, lists... This very page is actually written in markdown, and the engine that builds it renders the text in a nicely formatted manner.
If you open a .md file with vscode for example, you'll get a magnigying glass on the top right corner to switch to the rendered version of the file.
"},{"location":"tips-formats.html#toml-toml-files","title":"TOML (.toml) files","text":"TOML, or Tom's Obvious Minimal Language, is a configuration file format (similar to YAML). Again, it is basically a simple text file that can be opened with any text editor and is human-readable, but also computer-readable. This means that it is easy for most software and programming language to parse the file to associate a variable (or \"key\") to a value, thus making it a good file format for configuration. It is used in cuisto
(see The configuration files page).
The syntax looks like this :
# a comment, ignored by the computer\nkey1 = 10 # the key \"key1\" is mapped to the number 10\nkey2 = \"something\" # \"key2\" is mapped to the string \"something\"\nkey3 = [\"something else\", 1.10, -25] # \"key3\" is mapped to a list with 3 elements\n[section] # we can declare sections\nkey1 = 5 # this is not \"key1\", it actually is section.key1\n[section.example] # we can have nested sections\nkey1 = true # this is section.example.key1, mapped to the boolean True\n
You can check the full specification of this language here.
"},{"location":"tips-formats.html#csv-csv-tsv-files","title":"CSV (.csv, .tsv) files","text":"CSV (or TSV) stands for Comma-Separated Values (or Tab-Separated Values) and is, once again, a simple text file formatted in a way that allows LibreOffice Calc (or Excel) to open them as a table. Lines of the table are delimited with new lines, and columns are separated with commas (,
) or tabulations. Those files are easily parsed by programming languages (including Python). QuPath can export annotations and detections measurements in TSV format.
JSON is a \"data-interchange format\". It is used to store data, very much like toml, but supports more complex data and is more efficient to read and write, but is less human-readable. It is used in cuisto
to store fibers-like objects coordinates, as they can contain several millions of points (making CSV not usable).
GeoJson is a file format used to store geographic data structures, basically objects coordinates with various shapes. It is based on and compatible with JSON, which makes it easy to parse in numerous programming language. It used in QuPath to import and export objects, that can be point, line, polygons...
"},{"location":"tips-qupath.html","title":"QuPath","text":""},{"location":"tips-qupath.html#custom-scripts","title":"Custom scripts","text":"While QuPath graphical user interface (GUI) should meet a lot of your needs, it is very convenient to use scripting to automate certain tasks, execute them in batch (on all your images) and do things you couldn't do otherwise. QuPath uses the Groovy programming language, which is mostly Java.
Warning
Not all commands will appear in the history.
In QuPath, in the left panel in the \"Workflow\" tab, there is an history of most of the commands you used during the session. On the bottom, you can click on Create workflow
to select the relevant commands and create a script. This will open the built-in script editor that will contain the groovy version of what you did graphically.
Tip
The scripts/qupath-utils
folder contains a bunch of utility scripts.
They can be run in batch with the three-dotted menu on the bottom right corner of the script editor : Run for project
, then choose the images you want the script to run on.
This notebook shows how to load data exported from QuPath, compute metrics and display them, according to the configuration file. This is meant for a single-animal.
There are some conventions that need to be met in the QuPath project so that the measurements are usable with cuisto
:
You should copy this notebook, the configuration file and the atlas-related configuration files (blacklist and fusion) elsewhere and edit them according to your need.
The data was generated from QuPath with stardist cell detection on toy data.
In\u00a0[1]: Copied!import pandas as pd\n\nimport cuisto\nimport pandas as pd import cuisto In\u00a0[2]: Copied!
# Full path to your configuration file, edited according to your need beforehand\nconfig_file = \"../../resources/demo_config_cells.toml\"\n# Full path to your configuration file, edited according to your need beforehand config_file = \"../../resources/demo_config_cells.toml\" In\u00a0[3]: Copied!
# - Files\n# animal identifier\nanimal = \"animalid0\"\n# set the full path to the annotations tsv file from QuPath\nannotations_file = \"../../resources/cells_measurements_annotations.tsv\"\n# set the full path to the detections tsv file from QuPath\ndetections_file = \"../../resources/cells_measurements_detections.tsv\"\n# - Files # animal identifier animal = \"animalid0\" # set the full path to the annotations tsv file from QuPath annotations_file = \"../../resources/cells_measurements_annotations.tsv\" # set the full path to the detections tsv file from QuPath detections_file = \"../../resources/cells_measurements_detections.tsv\" In\u00a0[4]: Copied!
# get configuration\ncfg = cuisto.config.Config(config_file)\n# get configuration cfg = cuisto.config.Config(config_file) In\u00a0[5]: Copied!
# read data\ndf_annotations = pd.read_csv(annotations_file, index_col=\"Object ID\", sep=\"\\t\")\ndf_detections = pd.read_csv(detections_file, index_col=\"Object ID\", sep=\"\\t\")\n\n# remove annotations that are not brain regions\ndf_annotations = df_annotations[df_annotations[\"Classification\"] != \"Region*\"]\ndf_annotations = df_annotations[df_annotations[\"ROI\"] != \"Rectangle\"]\n\n# convert atlas coordinates from mm to microns\ndf_detections[[\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"]] = df_detections[\n [\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"]\n].multiply(1000)\n\n# have a look\ndisplay(df_annotations.head())\ndisplay(df_detections.head())\n# read data df_annotations = pd.read_csv(annotations_file, index_col=\"Object ID\", sep=\"\\t\") df_detections = pd.read_csv(detections_file, index_col=\"Object ID\", sep=\"\\t\") # remove annotations that are not brain regions df_annotations = df_annotations[df_annotations[\"Classification\"] != \"Region*\"] df_annotations = df_annotations[df_annotations[\"ROI\"] != \"Rectangle\"] # convert atlas coordinates from mm to microns df_detections[[\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"]] = df_detections[ [\"Atlas_X\", \"Atlas_Y\", \"Atlas_Z\"] ].multiply(1000) # have a look display(df_annotations.head()) display(df_detections.head()) Image Object type Name Classification Parent ROI Centroid X \u00b5m Centroid Y \u00b5m Cells: marker+ Count Cells: marker- Count ID Side Parent ID Num Detections Num Cells: marker+ Num Cells: marker- Area \u00b5m^2 Perimeter \u00b5m Object ID 4781ed63-0d8e-422e-aead-b685fbe20eb5 animalid0_030.ome.tiff Annotation Root NaN Root object (Image) Geometry 5372.5 3922.1 0 0 NaN NaN NaN 2441 136 2305 31666431.6 37111.9 aa4b133d-13f9-42d9-8c21-45f143b41a85 animalid0_030.ome.tiff Annotation root Right: root Root Polygon 7094.9 4085.7 0 0 997 0.0 NaN 1284 41 1243 15882755.9 18819.5 42c3b914-91c5-4b65-a603-3f9431717d48 animalid0_030.ome.tiff Annotation grey Right: grey root Geometry 7256.8 4290.6 0 0 8 0.0 997.0 1009 24 985 12026268.7 49600.3 887af3eb-4061-4f8a-aa4c-fe9b81184061 animalid0_030.ome.tiff Annotation CB Right: CB grey Geometry 7778.7 3679.2 0 16 512 0.0 8.0 542 5 537 6943579.0 30600.2 adaabc05-36d1-4aad-91fe-2e904adc574f animalid0_030.ome.tiff Annotation CBN Right: CBN CB Geometry 6790.5 3567.9 0 0 519 0.0 512.0 55 1 54 864212.3 7147.4 Image Object type Name Classification Parent ROI Atlas_X Atlas_Y Atlas_Z Object ID 5ff386a8-5abd-46d1-8e0d-f5c5365457c1 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11523.0 4272.4 4276.7 9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11520.2 4278.4 4418.6 481a519b-8b40-4450-9ec6-725181807d72 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11506.0 4317.2 4356.3 fd28e09c-2c64-4750-b026-cd99e3526a57 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11528.4 4257.4 4336.4 3d9ce034-f2ed-4c73-99be-f782363cf323 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11548.7 4203.3 4294.3 In\u00a0[6]: Copied!
# get distributions per regions, spatial distributions and coordinates\ndf_regions, dfs_distributions, df_coordinates = cuisto.process.process_animal(\n animal, df_annotations, df_detections, cfg, compute_distributions=True\n)\n\n# have a look\ndisplay(df_regions.head())\ndisplay(df_coordinates.head())\n# get distributions per regions, spatial distributions and coordinates df_regions, dfs_distributions, df_coordinates = cuisto.process.process_animal( animal, df_annotations, df_detections, cfg, compute_distributions=True ) # have a look display(df_regions.head()) display(df_coordinates.head()) Name hemisphere Area \u00b5m^2 Area mm^2 count density \u00b5m^-2 density mm^-2 coverage index relative count relative density channel animal 0 ACVII Left 8307.1 0.008307 1 0.00012 120.378953 0.00012 0.002132 0.205275 Positive animalid0 0 ACVII Left 8307.1 0.008307 1 0.00012 120.378953 0.00012 0.000189 0.020671 Negative animalid0 1 ACVII Right 7061.4 0.007061 0 0.0 0.0 0.0 0.0 0.0 Positive animalid0 1 ACVII Right 7061.4 0.007061 1 0.000142 141.614977 0.000142 0.000144 0.021646 Negative animalid0 2 ACVII both 15368.5 0.015369 1 0.000065 65.068159 0.000065 0.001362 0.153797 Positive animalid0 Image Object type Name Classification Parent ROI Atlas_X Atlas_Y Atlas_Z hemisphere channel Atlas_AP Atlas_DV Atlas_ML animal Object ID 5ff386a8-5abd-46d1-8e0d-f5c5365457c1 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11.5230 4.2724 4.2767 Right Negative -6.433716 3.098278 -1.4233 animalid0 9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11.5202 4.2784 4.4186 Right Negative -6.431449 3.104147 -1.2814 animalid0 481a519b-8b40-4450-9ec6-725181807d72 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11.5060 4.3172 4.3563 Right Negative -6.420685 3.141780 -1.3437 animalid0 fd28e09c-2c64-4750-b026-cd99e3526a57 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11.5284 4.2574 4.3364 Right Negative -6.437788 3.083737 -1.3636 animalid0 3d9ce034-f2ed-4c73-99be-f782363cf323 animalid0_030.ome.tiff Detection NaN Cells: marker- VeCB Polygon 11.5487 4.2033 4.2943 Right Negative -6.453296 3.031224 -1.4057 animalid0 In\u00a0[7]: Copied!
# plot distributions per regions\nfigs_regions = cuisto.display.plot_regions(df_regions, cfg)\n# specify which regions to plot\n# figs_regions = cuisto.display.plot_regions(df_regions, cfg, names_list=[\"GRN\", \"IRN\", \"MDRNv\"])\n\n# save as svg\n# figs_regions[0].savefig(r\"C:\\Users\\glegoc\\Downloads\\regions_count.svg\")\n# figs_regions[1].savefig(r\"C:\\Users\\glegoc\\Downloads\\regions_density.svg\")\n# plot distributions per regions figs_regions = cuisto.display.plot_regions(df_regions, cfg) # specify which regions to plot # figs_regions = cuisto.display.plot_regions(df_regions, cfg, names_list=[\"GRN\", \"IRN\", \"MDRNv\"]) # save as svg # figs_regions[0].savefig(r\"C:\\Users\\glegoc\\Downloads\\regions_count.svg\") # figs_regions[1].savefig(r\"C:\\Users\\glegoc\\Downloads\\regions_density.svg\") In\u00a0[8]: Copied!
# plot 1D distributions\nfig_distrib = cuisto.display.plot_1D_distributions(\n dfs_distributions, cfg, df_coordinates=df_coordinates\n)\n# plot 1D distributions fig_distrib = cuisto.display.plot_1D_distributions( dfs_distributions, cfg, df_coordinates=df_coordinates )
If there were several animal
in the measurement file, it would be displayed as mean +/- sem instead.
# plot heatmap (all types of cells pooled)\nfig_heatmap = cuisto.display.plot_2D_distributions(df_coordinates, cfg)\n# plot heatmap (all types of cells pooled) fig_heatmap = cuisto.display.plot_2D_distributions(df_coordinates, cfg)"},{"location":"demo_notebooks/density_map.html","title":"Density map","text":"
Draw 2D heatmaps as density isolines.
This notebook does not actually use histoquant
and relies only on brainglobe-heatmap to extract brain structures outlines.
Only the detections measurements with atlas coordinates exported from QuPath are used.
You need to select the range of data to be used, the regions outlines will be extracted at the centroid of that range. Therefore, a range that is too large will be misleading and irrelevant.
In\u00a0[1]: Copied!import brainglobe_heatmap as bgh\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport brainglobe_heatmap as bgh import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns In\u00a0[2]: Copied!
# path to the exported measurements from QuPath\nfilename = \"../../resources/cells_measurements_detections.tsv\"\n# path to the exported measurements from QuPath filename = \"../../resources/cells_measurements_detections.tsv\"
Settings
In\u00a0[3]: Copied!# atlas to use\natlas_name = \"allen_mouse_10um\"\n# brain regions whose outlines will be plotted\nregions = [\"root\", \"CB\", \"MY\", \"GRN\", \"IRN\"]\n# range to include, in Allen coordinates, in microns\nap_lims = [9800, 10000] # lims : [0, 13200] for coronal\nml_lims = [5600, 5800] # lims : [0, 11400] for sagittal\ndv_lims = [3900, 4100] # lims : [0, 8000] for top\n# number of isolines\nnlevels = 5\n# color mapping between classification and matplotlib color\npalette = {\"Cells: marker-\": \"#d8782f\", \"Cells: marker+\": \"#8ccb73\"}\n# atlas to use atlas_name = \"allen_mouse_10um\" # brain regions whose outlines will be plotted regions = [\"root\", \"CB\", \"MY\", \"GRN\", \"IRN\"] # range to include, in Allen coordinates, in microns ap_lims = [9800, 10000] # lims : [0, 13200] for coronal ml_lims = [5600, 5800] # lims : [0, 11400] for sagittal dv_lims = [3900, 4100] # lims : [0, 8000] for top # number of isolines nlevels = 5 # color mapping between classification and matplotlib color palette = {\"Cells: marker-\": \"#d8782f\", \"Cells: marker+\": \"#8ccb73\"} In\u00a0[4]: Copied!
df = pd.read_csv(filename, sep=\"\\t\")\ndisplay(df.head())\ndf = pd.read_csv(filename, sep=\"\\t\") display(df.head()) Image Object ID Object type Name Classification Parent ROI Atlas_X Atlas_Y Atlas_Z 0 animalid0_030.ome.tiff 5ff386a8-5abd-46d1-8e0d-f5c5365457c1 Detection NaN Cells: marker- VeCB Polygon 11.5230 4.2724 4.2767 1 animalid0_030.ome.tiff 9a2a9a8c-acbe-4308-bc5e-f3c9fd1754c0 Detection NaN Cells: marker- VeCB Polygon 11.5202 4.2784 4.4186 2 animalid0_030.ome.tiff 481a519b-8b40-4450-9ec6-725181807d72 Detection NaN Cells: marker- VeCB Polygon 11.5060 4.3172 4.3563 3 animalid0_030.ome.tiff fd28e09c-2c64-4750-b026-cd99e3526a57 Detection NaN Cells: marker- VeCB Polygon 11.5284 4.2574 4.3364 4 animalid0_030.ome.tiff 3d9ce034-f2ed-4c73-99be-f782363cf323 Detection NaN Cells: marker- VeCB Polygon 11.5487 4.2033 4.2943
Here we can filter out classifications we don't wan't to display.
In\u00a0[5]: Copied!# select objects\n# df = df[df[\"Classification\"] == \"example: classification\"]\n# select objects # df = df[df[\"Classification\"] == \"example: classification\"] In\u00a0[6]: Copied!
# get outline coordinates in coronal (=frontal) orientation\ncoords_coronal = bgh.get_structures_slice_coords(\n regions,\n orientation=\"frontal\",\n atlas_name=atlas_name,\n position=(np.mean(ap_lims), 0, 0),\n)\n# get outline coordinates in sagittal orientation\ncoords_sagittal = bgh.get_structures_slice_coords(\n regions,\n orientation=\"sagittal\",\n atlas_name=atlas_name,\n position=(0, 0, np.mean(ml_lims)),\n)\n# get outline coordinates in top (=horizontal) orientation\ncoords_top = bgh.get_structures_slice_coords(\n regions,\n orientation=\"horizontal\",\n atlas_name=atlas_name,\n position=(0, np.mean(dv_lims), 0),\n)\n# get outline coordinates in coronal (=frontal) orientation coords_coronal = bgh.get_structures_slice_coords( regions, orientation=\"frontal\", atlas_name=atlas_name, position=(np.mean(ap_lims), 0, 0), ) # get outline coordinates in sagittal orientation coords_sagittal = bgh.get_structures_slice_coords( regions, orientation=\"sagittal\", atlas_name=atlas_name, position=(0, 0, np.mean(ml_lims)), ) # get outline coordinates in top (=horizontal) orientation coords_top = bgh.get_structures_slice_coords( regions, orientation=\"horizontal\", atlas_name=atlas_name, position=(0, np.mean(dv_lims), 0), ) In\u00a0[7]: Copied!
# Coronal projection\n# select objects within the rostro-caudal range\ndf_coronal = df[\n (df[\"Atlas_X\"] >= ap_lims[0] / 1000) & (df[\"Atlas_X\"] <= ap_lims[1] / 1000)\n]\n\nplt.figure()\n\nfor struct_name, contours in coords_coronal.items():\n for cont in contours:\n plt.fill(cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\")\n\n# see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize\nax = sns.kdeplot(\n df_coronal,\n x=\"Atlas_Z\",\n y=\"Atlas_Y\",\n hue=\"Classification\",\n levels=nlevels,\n common_norm=False,\n palette=palette,\n)\nax.invert_yaxis()\nsns.despine(left=True, bottom=True)\nplt.axis(\"equal\")\nplt.xlabel(None)\nplt.ylabel(None)\nplt.xticks([])\nplt.yticks([])\nplt.plot([2, 3], [8, 8], \"k\", linewidth=3)\nplt.text(2, 7.9, \"1 mm\")\n# Coronal projection # select objects within the rostro-caudal range df_coronal = df[ (df[\"Atlas_X\"] >= ap_lims[0] / 1000) & (df[\"Atlas_X\"] <= ap_lims[1] / 1000) ] plt.figure() for struct_name, contours in coords_coronal.items(): for cont in contours: plt.fill(cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\") # see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize ax = sns.kdeplot( df_coronal, x=\"Atlas_Z\", y=\"Atlas_Y\", hue=\"Classification\", levels=nlevels, common_norm=False, palette=palette, ) ax.invert_yaxis() sns.despine(left=True, bottom=True) plt.axis(\"equal\") plt.xlabel(None) plt.ylabel(None) plt.xticks([]) plt.yticks([]) plt.plot([2, 3], [8, 8], \"k\", linewidth=3) plt.text(2, 7.9, \"1 mm\") Out[7]:
Text(2, 7.9, '1 mm')In\u00a0[8]: Copied!
# Sagittal projection\n# select objects within the medio-lateral range\ndf_sagittal = df[\n (df[\"Atlas_Z\"] >= ml_lims[0] / 1000) & (df[\"Atlas_Z\"] <= ml_lims[1] / 1000)\n]\n\nplt.figure()\n\nfor struct_name, contours in coords_sagittal.items():\n for cont in contours:\n plt.fill(cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\")\n\n# see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize\nax = sns.kdeplot(\n df_sagittal,\n x=\"Atlas_X\",\n y=\"Atlas_Y\",\n hue=\"Classification\",\n levels=nlevels,\n common_norm=False,\n palette=palette,\n)\nax.invert_yaxis()\nsns.despine(left=True, bottom=True)\nplt.axis(\"equal\")\nplt.xlabel(None)\nplt.ylabel(None)\nplt.xticks([])\nplt.yticks([])\nplt.plot([2, 3], [7.1, 7.1], \"k\", linewidth=3)\nplt.text(2, 7, \"1 mm\")\n# Sagittal projection # select objects within the medio-lateral range df_sagittal = df[ (df[\"Atlas_Z\"] >= ml_lims[0] / 1000) & (df[\"Atlas_Z\"] <= ml_lims[1] / 1000) ] plt.figure() for struct_name, contours in coords_sagittal.items(): for cont in contours: plt.fill(cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\") # see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize ax = sns.kdeplot( df_sagittal, x=\"Atlas_X\", y=\"Atlas_Y\", hue=\"Classification\", levels=nlevels, common_norm=False, palette=palette, ) ax.invert_yaxis() sns.despine(left=True, bottom=True) plt.axis(\"equal\") plt.xlabel(None) plt.ylabel(None) plt.xticks([]) plt.yticks([]) plt.plot([2, 3], [7.1, 7.1], \"k\", linewidth=3) plt.text(2, 7, \"1 mm\") Out[8]:
Text(2, 7, '1 mm')In\u00a0[9]: Copied!
# Top projection\n# select objects within the dorso-ventral range\ndf_top = df[(df[\"Atlas_Y\"] >= dv_lims[0] / 1000) & (df[\"Atlas_Y\"] <= dv_lims[1] / 1000)]\n\nplt.figure()\n\nfor struct_name, contours in coords_top.items():\n for cont in contours:\n plt.fill(-cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\")\n\n# see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize\nax = sns.kdeplot(\n df_top,\n x=\"Atlas_Z\",\n y=\"Atlas_X\",\n hue=\"Classification\",\n levels=nlevels,\n common_norm=False,\n palette=palette,\n)\nax.invert_yaxis()\nsns.despine(left=True, bottom=True)\nplt.axis(\"equal\")\nplt.xlabel(None)\nplt.ylabel(None)\nplt.xticks([])\nplt.yticks([])\nplt.plot([0.5, 1.5], [0.5, 0.5], \"k\", linewidth=3)\nplt.text(0.5, 0.4, \"1 mm\")\n# Top projection # select objects within the dorso-ventral range df_top = df[(df[\"Atlas_Y\"] >= dv_lims[0] / 1000) & (df[\"Atlas_Y\"] <= dv_lims[1] / 1000)] plt.figure() for struct_name, contours in coords_top.items(): for cont in contours: plt.fill(-cont[:, 0] / 1000, cont[:, 1] / 1000, lw=1, fc=\"none\", ec=\"k\") # see https://seaborn.pydata.org/generated/seaborn.kdeplot.html to customize ax = sns.kdeplot( df_top, x=\"Atlas_Z\", y=\"Atlas_X\", hue=\"Classification\", levels=nlevels, common_norm=False, palette=palette, ) ax.invert_yaxis() sns.despine(left=True, bottom=True) plt.axis(\"equal\") plt.xlabel(None) plt.ylabel(None) plt.xticks([]) plt.yticks([]) plt.plot([0.5, 1.5], [0.5, 0.5], \"k\", linewidth=3) plt.text(0.5, 0.4, \"1 mm\") Out[9]:
Text(0.5, 0.4, '1 mm')In\u00a0[\u00a0]: Copied!
\n"},{"location":"demo_notebooks/fibers_coverage.html","title":"Fibers coverage","text":"
Plot regions coverage percentage in the spinal cord.
This showcases that any brainglobe atlases should be supported.
Here we're going to quantify the percentage of area of each spinal cord regions innervated by axons.
The \"area \u00b5m^2\" measurement for each annotations can be created in QuPath with a pixel classifier, using the Measure button.
We're going to consider that the \"area \u00b5m^2\" measurement generated by the pixel classifier is an object count. histoquant
computes a density, which is the count in each region divided by its aera. Therefore, in this case, it will be actually the fraction of area covered by fibers in a given color.
The data was generated using QuPath with a pixel classifier on toy data.
In\u00a0[1]: Copied!import pandas as pd\n\nimport cuisto\nimport pandas as pd import cuisto In\u00a0[2]: Copied!
# Full path to your configuration file, edited according to your need beforehand\nconfig_file = \"../../resources/demo_config_fibers.toml\"\n# Full path to your configuration file, edited according to your need beforehand config_file = \"../../resources/demo_config_fibers.toml\" In\u00a0[3]: Copied!
# - Files\n# not important if only one animal\nanimal = \"animalid1-SC\"\n# set the full path to the annotations tsv file from QuPath\nannotations_file = \"../../resources/fibers_measurements_annotations.tsv\"\n# - Files # not important if only one animal animal = \"animalid1-SC\" # set the full path to the annotations tsv file from QuPath annotations_file = \"../../resources/fibers_measurements_annotations.tsv\" In\u00a0[4]: Copied!
# get configuration\ncfg = cuisto.config.Config(config_file)\n# get configuration cfg = cuisto.config.Config(config_file) In\u00a0[5]: Copied!
# read data\ndf_annotations = pd.read_csv(annotations_file, index_col=\"Object ID\", sep=\"\\t\")\ndf_detections = pd.DataFrame() # empty DataFrame\n\n# remove annotations that are not brain regions\ndf_annotations = df_annotations[df_annotations[\"Classification\"] != \"Region*\"]\ndf_annotations = df_annotations[df_annotations[\"ROI\"] != \"Rectangle\"]\n\n# have a look\ndisplay(df_annotations.head())\n# read data df_annotations = pd.read_csv(annotations_file, index_col=\"Object ID\", sep=\"\\t\") df_detections = pd.DataFrame() # empty DataFrame # remove annotations that are not brain regions df_annotations = df_annotations[df_annotations[\"Classification\"] != \"Region*\"] df_annotations = df_annotations[df_annotations[\"ROI\"] != \"Rectangle\"] # have a look display(df_annotations.head()) Image Object type Name Classification Parent ROI Centroid X \u00b5m Centroid Y \u00b5m Fibers: EGFP area \u00b5m^2 Fibers: DsRed area \u00b5m^2 ID Side Parent ID Area \u00b5m^2 Perimeter \u00b5m Object ID dcfe5196-4e8d-4126-b255-a9ea393c383a animalid1-SC_s1.ome.tiff Annotation Root NaN Root object (Image) Geometry 1353.70 1060.00 108993.1953 15533.3701 NaN NaN NaN 3172474.0 9853.3 acc74bc0-3dd0-4b3e-86e3-e6c7b681d544 animalid1-SC_s1.ome.tiff Annotation root Right: root Root Polygon 864.44 989.95 39162.8906 5093.2798 250.0 0.0 NaN 1603335.7 4844.2 94571cf9-f22b-453f-860c-eb13d0e72440 animalid1-SC_s1.ome.tiff Annotation WM Right: WM root Geometry 791.00 1094.60 20189.0469 2582.4824 130.0 0.0 250.0 884002.0 7927.8 473d65fb-fda4-4721-ba6f-cc659efc1d5a animalid1-SC_s1.ome.tiff Annotation vf Right: vf WM Polygon 984.31 1599.00 6298.3574 940.4100 70.0 0.0 130.0 281816.9 2719.5 449e2cd1-eca2-4708-83fe-651f378c3a14 animalid1-SC_s1.ome.tiff Annotation df Right: df WM Polygon 1242.90 401.26 1545.0750 241.3800 74.0 0.0 130.0 152952.8 1694.4 In\u00a0[6]: Copied!
# get distributions per regions, spatial distributions and coordinates\ndf_regions, dfs_distributions, df_coordinates = cuisto.process.process_animal(\n animal, df_annotations, df_detections, cfg, compute_distributions=False\n)\n\n# convert the \"density \u00b5m^-2\" column, which is actually the coverage fraction, to a percentage\ndf_regions[\"density \u00b5m^-2\"] = df_regions[\"density \u00b5m^-2\"] * 100\n\n# have a look\ndisplay(df_regions.head())\n# get distributions per regions, spatial distributions and coordinates df_regions, dfs_distributions, df_coordinates = cuisto.process.process_animal( animal, df_annotations, df_detections, cfg, compute_distributions=False ) # convert the \"density \u00b5m^-2\" column, which is actually the coverage fraction, to a percentage df_regions[\"density \u00b5m^-2\"] = df_regions[\"density \u00b5m^-2\"] * 100 # have a look display(df_regions.head()) Name hemisphere Area \u00b5m^2 Area mm^2 area \u00b5m^2 area mm^2 density \u00b5m^-2 density mm^-2 coverage index relative count relative density channel animal 0 10Sp Contra. 1749462.18 1.749462 53117.3701 53.11737 3.036211 30362.113973 1612.755645 0.036535 0.033062 Negative animalid1-SC 0 10Sp Contra. 1749462.18 1.749462 5257.1025 5.257103 0.300498 3004.98208 15.797499 0.030766 0.02085 Positive animalid1-SC 1 10Sp Ipsi. 1439105.93 1.439106 64182.9823 64.182982 4.459921 44599.206328 2862.51007 0.023524 0.023265 Negative animalid1-SC 1 10Sp Ipsi. 1439105.93 1.439106 8046.3375 8.046337 0.559121 5591.205854 44.988729 0.028911 0.022984 Positive animalid1-SC 2 10Sp both 3188568.11 3.188568 117300.3524 117.300352 3.678778 36787.783216 4315.219935 0.028047 0.025734 Negative animalid1-SC In\u00a0[7]: Copied!
# plot distributions per regions\nfig_regions = cuisto.display.plot_regions(df_regions, cfg)\n# specify which regions to plot\n# fig_regions = hq.display.plot_regions(df_regions, cfg, names_list=[\"Rh9\", \"Sr9\", \"8Sp\"])\n\n# save as svg\n# fig_regions[0].savefig(r\"C:\\Users\\glegoc\\Downloads\\nice_figure.svg\")\n# plot distributions per regions fig_regions = cuisto.display.plot_regions(df_regions, cfg) # specify which regions to plot # fig_regions = hq.display.plot_regions(df_regions, cfg, names_list=[\"Rh9\", \"Sr9\", \"8Sp\"]) # save as svg # fig_regions[0].savefig(r\"C:\\Users\\glegoc\\Downloads\\nice_figure.svg\")"},{"location":"demo_notebooks/fibers_length_multi.html","title":"Fibers length in multi animals","text":"In\u00a0[1]: Copied!
import cuisto\nimport cuisto In\u00a0[2]: Copied!
# Full path to your configuration file, edited according to your need beforehand\nconfig_file = \"../../resources/demo_config_multi.toml\"\n# Full path to your configuration file, edited according to your need beforehand config_file = \"../../resources/demo_config_multi.toml\" In\u00a0[3]: Copied!
# Files\nwdir = \"../../resources/multi\"\nanimals = [\"mouse0\", \"mouse1\"]\n# Files wdir = \"../../resources/multi\" animals = [\"mouse0\", \"mouse1\"] In\u00a0[4]: Copied!
# get configuration\ncfg = cuisto.Config(config_file)\n# get configuration cfg = cuisto.Config(config_file) In\u00a0[5]: Copied!
# get distributions per regions\ndf_regions, _, _ = cuisto.process.process_animals(\n wdir, animals, cfg, compute_distributions=False\n)\n\n# have a look\ndisplay(df_regions.head(10))\n# get distributions per regions df_regions, _, _ = cuisto.process.process_animals( wdir, animals, cfg, compute_distributions=False ) # have a look display(df_regions.head(10))
Processing mouse1: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 2/2 [00:00<00:00, 15.66it/s]\nName hemisphere Area \u00b5m^2 Area mm^2 length \u00b5m length mm density \u00b5m^-1 density mm^-1 coverage index relative count relative density channel animal 0 ACVII Contra. 9099.04 0.009099 468.0381 0.468038 0.051438 51438.184688 24.07503 0.00064 0.022168 marker3 mouse0 1 ACVII Contra. 9099.04 0.009099 4260.4844 4.260484 0.468234 468234.495068 1994.905762 0.0019 0.056502 marker2 mouse0 2 ACVII Contra. 9099.04 0.009099 5337.7103 5.33771 0.586623 586623.45698 3131.226069 0.010104 0.242734 marker1 mouse0 3 ACVII Ipsi. 4609.90 0.004610 0.0 0.0 0.0 0.0 0.0 0.0 0.0 marker3 mouse0 4 ACVII Ipsi. 4609.90 0.004610 0.0 0.0 0.0 0.0 0.0 0.0 0.0 marker2 mouse0 5 ACVII Ipsi. 4609.90 0.004610 0.0 0.0 0.0 0.0 0.0 0.0 0.0 marker1 mouse0 6 ACVII both 13708.94 0.013709 468.0381 0.468038 0.034141 34141.086036 15.979329 0.000284 0.011001 marker3 mouse0 7 ACVII both 13708.94 0.013709 4260.4844 4.260484 0.310781 310781.460857 1324.079566 0.000934 0.030688 marker2 mouse0 8 ACVII both 13708.94 0.013709 5337.7103 5.33771 0.38936 389359.811918 2078.289878 0.00534 0.142623 marker1 mouse0 9 AMB Contra. 122463.80 0.122464 30482.7815 30.482782 0.248913 248912.588863 7587.548059 0.041712 0.107271 marker3 mouse0 In\u00a0[6]: Copied!
figs_regions = cuisto.display.plot_regions(df_regions, cfg)\nfigs_regions = cuisto.display.plot_regions(df_regions, cfg)"},{"location":"demo_notebooks/fibers_length_multi.html#fibers-length-in-multi-animals","title":"Fibers length in multi animals\u00b6","text":"
This example uses synthetic data to showcase how histoquant
can be used in a pipeline.
Annotations measurements should be exported from QuPath, following the required directory structure.
Alternatively, you can merge all your CSV files yourself, one per animal, adding an animal ID to each table. Those can be processed with the histoquant.process.process_animal()
function, in a loop, collecting the results at each iteration and finally concatenating the results. Finally, those can be used with display
module. See the API reference for the process
module.
With cuisto
, it is possible to plot 2D heatmaps on brain contours.
All the detections are projected in a single plane, thus it is up to you to select a relevant data range. It is primarily intended to give a quick, qualitative overview of the spreading of your data.
+To do so, it requires the brain regions outlines, stored in a hdf5 file. This can be generated with brainglobe-atlasapi
. The generate_atlas_outlines.py
located in scripts/atlas
will show you how to make such a file, that the cuisto.display
module can use.
Alternatively it is possible to directly plot density maps without cuisto
, using brainglobe-heatmap
. An example is shown here.
The representation of an image in a computer is basically a table where each element represents the pixel value (see more here). It can be n-dimensional, where the typical dimensions would be \((x, y, z)\), time and the fluorescence channels.
+In large images, such as histological slices that are more than 10000\(\times\)10000 pixels, a strategy called tiling is used to optimize access to specific regions in the image. Storing the whole image at once in a file would imply to load the whole thing at once in the memory (RAM), even though one would only need to access a given rectangular region with a given zoom. Instead, the image is stored as tiles, small squares (512--2048 pixels) that pave the whole image and are used to reconstruct the original image. Therefore, when zooming-in, only the relevant tiles are loaded and displayed, allowing for smooth large image navigation. This process is done seamlessly by software like QuPath and BigDataViewer (the Fiji plugin ABBA is based on) when loading tiled images. This is also leveraged for image processing in QuPath, which will work on tiles instead of the whole image to not saturate your computer RAM.
+Most images are already tiled, including Zeiss CZI images. Note that those tiles do not necessarily correspond to the actual, real-world, tiles the microscope did to image the whole slide.
+In the same spirit as tiles, it would be a waste to have to load the entire image (and all the tiles) at once when viewing the image at max zoom-out, as your monitor nor your eyes would handle it. Instead, smaller, rescaled versions of the original image are stored alongside it, and depending on the zoom you are using, the sub-resolution version is displayed. Again, this is done seamlessly by QuPath and ABBA, allowing you to quickly switch from an image to another, without having to load the GB-sized image. Also, for image processing that does not require the original pixel size, QuPath can also leverage pyramids to go faster.
+Usually, upon openning a CZI file in ZEN, there is a pop-up suggesting you to generate pyramids. It is a very good idea to say yes, wait a bit and save the file so that the pyramidal levels are saved within the file.
+Metadata, while often overlooked, are of paramount importance in microscopy data. It allows both softwares and users to interpret the raw data of images, eg. the values of each pixels. Most image file formats support this, including the microcope manufacturer file formats. Metadata may include :
+Pixel size is the parameter that is absolutely necessary. Channel names and colors are more a quality of life feature, to make sure not to mix your difference fluorescence channels. CZI files or exported OME-TIFF files include this out of the box so you don't really need to pay attention.
+Bio-formats is an initiative of the Open Microscopy Environment (OME) consortium, aiming at being able to read proprietary microscopy image data and metadata. It is used in QuPath, Fiji and ABBA.
+This page summarizes the level of support of numerous file formats. You can see that Zeiss CZI files and Leica LIF are quite well supported, and should therefore work out of the box in QuPath.
+QuPath and ABBA supports any Bio-formats supported, tiled, pyramidal images.
+If you're in luck, adding the pyramidal CZI file to your QuPath project will just work. If it doesn't, you'll notice immediately : the tiles will be shuffled and you'll see only a part of the image instead of the whole one. Unfortunately I was not able to determine why this happens and did not find a way to even predict if a file will or will not work.
+In the event you experience this bug, you'll need to export the CZI files to OME-TIFF files from ZEN, then generate tiled pyramidal images with the pyramid-creator
package that you can find here.
Markdown is a markup language to create formatted text. It is basically a simple text file that could be opened with any text editor software (notepad and the like), but features specific tags to format the text with heading levels, typesetting (bold, itallic), links, lists... This very page is actually written in markdown, and the engine that builds it renders the text in a nicely formatted manner.
+If you open a .md file with vscode for example, you'll get a magnigying glass on the top right corner to switch to the rendered version of the file.
+TOML, or Tom's Obvious Minimal Language, is a configuration file format (similar to YAML). Again, it is basically a simple text file that can be opened with any text editor and is human-readable, but also computer-readable. This means that it is easy for most software and programming language to parse the file to associate a variable (or "key") to a value, thus making it a good file format for configuration. It is used in cuisto
(see The configuration files page).
The syntax looks like this : +
# a comment, ignored by the computer
+key1 = 10 # the key "key1" is mapped to the number 10
+key2 = "something" # "key2" is mapped to the string "something"
+key3 = ["something else", 1.10, -25] # "key3" is mapped to a list with 3 elements
+[section] # we can declare sections
+key1 = 5 # this is not "key1", it actually is section.key1
+[section.example] # we can have nested sections
+key1 = true # this is section.example.key1, mapped to the boolean True
+
You can check the full specification of this language here.
+CSV (or TSV) stands for Comma-Separated Values (or Tab-Separated Values) and is, once again, a simple text file formatted in a way that allows LibreOffice Calc (or Excel) to open them as a table. Lines of the table are delimited with new lines, and columns are separated with commas (,
) or tabulations. Those files are easily parsed by programming languages (including Python). QuPath can export annotations and detections measurements in TSV format.
JSON is a "data-interchange format". It is used to store data, very much like toml, but supports more complex data and is more efficient to read and write, but is less human-readable. It is used in cuisto
to store fibers-like objects coordinates, as they can contain several millions of points (making CSV not usable).
GeoJson is a file format used to store geographic data structures, basically objects coordinates with various shapes. It is based on and compatible with JSON, which makes it easy to parse in numerous programming language. It used in QuPath to import and export objects, that can be point, line, polygons...
+ + + + + + + + + + + + + + + + + + + + + + + +While QuPath graphical user interface (GUI) should meet a lot of your needs, it is very convenient to use scripting to automate certain tasks, execute them in batch (on all your images) and do things you couldn't do otherwise. QuPath uses the Groovy programming language, which is mostly Java.
+Warning
+Not all commands will appear in the history.
+In QuPath, in the left panel in the "Workflow" tab, there is an history of most of the commands you used during the session. On the bottom, you can click on Create workflow
to select the relevant commands and create a script. This will open the built-in script editor that will contain the groovy version of what you did graphically.
Tip
+The scripts/qupath-utils
folder contains a bunch of utility scripts.
They can be run in batch with the three-dotted menu on the bottom right corner of the script editor : Run for project
, then choose the images you want the script to run on.