From b32b8b31cd0028762e431a405a34f4f4a7a81688 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 13 Dec 2024 13:16:25 +0000 Subject: [PATCH] Deployed 84f08b9 with MkDocs version: 1.6.1 --- .nojekyll | 0 404.html | 1175 +++ api-compute.html | 2536 +++++++ api-config-config.html | 1315 ++++ api-config.html | 1951 +++++ api-display.html | 4647 ++++++++++++ api-io.html | 2777 +++++++ api-process.html | 2252 ++++++ api-script-pyramids.html | 2659 +++++++ api-script-qupath-script-runner.html | 1647 ++++ api-script-segment.html | 3334 ++++++++ api-seg.html | 3666 +++++++++ api-utils.html | 4588 +++++++++++ assets/_mkdocstrings.css | 143 + .../twemoji@15.1.0/assets/svg/26a0.svg | 1 + .../fonts.googleapis.com/css.49ea35f2.css | 594 ++ .../v32/KFOjCnqEu92Fr1Mu51TjASc-CsTKlA.woff2 | Bin 0 -> 10656 bytes .../v32/KFOjCnqEu92Fr1Mu51TjASc0CsTKlA.woff2 | Bin 0 -> 13360 bytes .../v32/KFOjCnqEu92Fr1Mu51TjASc1CsTKlA.woff2 | Bin 0 -> 6144 bytes .../v32/KFOjCnqEu92Fr1Mu51TjASc2CsTKlA.woff2 | Bin 0 -> 1536 bytes .../v32/KFOjCnqEu92Fr1Mu51TjASc3CsTKlA.woff2 | Bin 0 -> 16756 bytes .../v32/KFOjCnqEu92Fr1Mu51TjASc5CsTKlA.woff2 | Bin 0 -> 7708 bytes .../v32/KFOjCnqEu92Fr1Mu51TjASc6CsQ.woff2 | Bin 0 -> 20216 bytes .../v32/KFOjCnqEu92Fr1Mu51TzBic-CsTKlA.woff2 | Bin 0 -> 10356 bytes .../v32/KFOjCnqEu92Fr1Mu51TzBic0CsTKlA.woff2 | Bin 0 -> 13104 bytes .../v32/KFOjCnqEu92Fr1Mu51TzBic1CsTKlA.woff2 | Bin 0 -> 6148 bytes .../v32/KFOjCnqEu92Fr1Mu51TzBic2CsTKlA.woff2 | Bin 0 -> 1468 bytes .../v32/KFOjCnqEu92Fr1Mu51TzBic3CsTKlA.woff2 | Bin 0 -> 16080 bytes .../v32/KFOjCnqEu92Fr1Mu51TzBic5CsTKlA.woff2 | Bin 0 -> 7464 bytes .../v32/KFOjCnqEu92Fr1Mu51TzBic6CsQ.woff2 | Bin 0 -> 19780 bytes .../v32/KFOkCnqEu92Fr1Mu51xEIzIFKw.woff2 | Bin 0 -> 1516 bytes .../v32/KFOkCnqEu92Fr1Mu51xFIzIFKw.woff2 | Bin 0 -> 16688 bytes .../v32/KFOkCnqEu92Fr1Mu51xGIzIFKw.woff2 | Bin 0 -> 13224 bytes .../v32/KFOkCnqEu92Fr1Mu51xHIzIFKw.woff2 | Bin 0 -> 6144 bytes .../roboto/v32/KFOkCnqEu92Fr1Mu51xIIzI.woff2 | Bin 0 -> 20144 bytes .../v32/KFOkCnqEu92Fr1Mu51xLIzIFKw.woff2 | Bin 0 -> 7724 bytes .../v32/KFOkCnqEu92Fr1Mu51xMIzIFKw.woff2 | Bin 0 -> 10492 bytes .../v32/KFOlCnqEu92Fr1MmSU5fABc4EsA.woff2 | Bin 0 -> 9684 bytes .../roboto/v32/KFOlCnqEu92Fr1MmSU5fBBc4.woff2 | Bin 0 -> 18492 bytes .../v32/KFOlCnqEu92Fr1MmSU5fBxc4EsA.woff2 | Bin 0 -> 7180 bytes .../v32/KFOlCnqEu92Fr1MmSU5fCBc4EsA.woff2 | Bin 0 -> 1500 bytes .../v32/KFOlCnqEu92Fr1MmSU5fCRc4EsA.woff2 | Bin 0 -> 15028 bytes .../v32/KFOlCnqEu92Fr1MmSU5fChc4EsA.woff2 | Bin 0 -> 12324 bytes .../v32/KFOlCnqEu92Fr1MmSU5fCxc4EsA.woff2 | Bin 0 -> 5688 bytes .../v32/KFOlCnqEu92Fr1MmWUlfABc4EsA.woff2 | Bin 0 -> 9780 bytes .../roboto/v32/KFOlCnqEu92Fr1MmWUlfBBc4.woff2 | Bin 0 -> 18596 bytes .../v32/KFOlCnqEu92Fr1MmWUlfBxc4EsA.woff2 | Bin 0 -> 6904 bytes .../v32/KFOlCnqEu92Fr1MmWUlfCBc4EsA.woff2 | Bin 0 -> 1456 bytes .../v32/KFOlCnqEu92Fr1MmWUlfCRc4EsA.woff2 | Bin 0 -> 14740 bytes .../v32/KFOlCnqEu92Fr1MmWUlfChc4EsA.woff2 | Bin 0 -> 12304 bytes .../v32/KFOlCnqEu92Fr1MmWUlfCxc4EsA.woff2 | Bin 0 -> 5708 bytes .../roboto/v32/KFOmCnqEu92Fr1Mu4WxKOzY.woff2 | Bin 0 -> 7096 bytes .../s/roboto/v32/KFOmCnqEu92Fr1Mu4mxK.woff2 | Bin 0 -> 18536 bytes .../roboto/v32/KFOmCnqEu92Fr1Mu5mxKOzY.woff2 | Bin 0 -> 9852 bytes .../roboto/v32/KFOmCnqEu92Fr1Mu72xKOzY.woff2 | Bin 0 -> 15336 bytes .../roboto/v32/KFOmCnqEu92Fr1Mu7GxKOzY.woff2 | Bin 0 -> 12456 bytes .../roboto/v32/KFOmCnqEu92Fr1Mu7WxKOzY.woff2 | Bin 0 -> 5796 bytes .../roboto/v32/KFOmCnqEu92Fr1Mu7mxKOzY.woff2 | Bin 0 -> 1496 bytes ...wgGEFl0_3vrtSM1J-gEPT5Ese6hmHSV0mf0h.woff2 | Bin 0 -> 24792 bytes ...wgGEFl0_3vrtSM1J-gEPT5Ese6hmHSZ0mf0h.woff2 | Bin 0 -> 16296 bytes ...wgGEFl0_3vrtSM1J-gEPT5Ese6hmHSd0mf0h.woff2 | Bin 0 -> 7528 bytes ...5mwgGEFl0_3vrtSM1J-gEPT5Ese6hmHSh0mQ.woff2 | Bin 0 -> 22736 bytes ...wgGEFl0_3vrtSM1J-gEPT5Ese6hmHSt0mf0h.woff2 | Bin 0 -> 10096 bytes ...wgGEFl0_3vrtSM1J-gEPT5Ese6hmHSx0mf0h.woff2 | Bin 0 -> 13036 bytes ...euFoqFrlnAIe2Imhk1T8rbociImtElOUlYIw.woff2 | Bin 0 -> 7972 bytes ...euFoqFrlnAIe2Imhk1T8rbociImtEleUlYIw.woff2 | Bin 0 -> 17428 bytes ...euFoqFrlnAIe2Imhk1T8rbociImtEluUlYIw.woff2 | Bin 0 -> 26644 bytes ...q2oeuFoqFrlnAIe2Imhk1T8rbociImtEm-Ul.woff2 | Bin 0 -> 24652 bytes ...euFoqFrlnAIe2Imhk1T8rbociImtEmOUlYIw.woff2 | Bin 0 -> 10704 bytes ...euFoqFrlnAIe2Imhk1T8rbociImtEn-UlYIw.woff2 | Bin 0 -> 14288 bytes .../external/unpkg.com/iframe-worker/shim.js | 1 + .../katex@0/dist/contrib/auto-render.min.js | 1 + .../unpkg.com/katex@0/dist/katex.min.css | 1 + .../unpkg.com/katex@0/dist/katex.min.js | 1 + .../unpkg.com/mermaid@11/dist/mermaid.min.js | 2314 ++++++ assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.83f73b43.min.js | 16 + assets/javascripts/bundle.83f73b43.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.6ce7567c.min.js | 42 + .../workers/search.6ce7567c.min.js.map | 7 + assets/stylesheets/main.6f8fc17f.min.css | 1 + assets/stylesheets/main.6f8fc17f.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + demo_notebooks/cells_distributions.html | 2692 +++++++ demo_notebooks/cells_distributions.ipynb | 934 +++ demo_notebooks/density_map.html | 2492 ++++++ demo_notebooks/density_map.ipynb | 523 ++ demo_notebooks/fibers_coverage.html | 2297 ++++++ demo_notebooks/fibers_coverage.ipynb | 511 ++ demo_notebooks/fibers_length_multi.html | 2163 ++++++ demo_notebooks/fibers_length_multi.ipynb | 372 + guide-create-pyramids.html | 1483 ++++ guide-install-abba.html | 1650 ++++ guide-pipeline.html | 1512 ++++ guide-prepare-qupath.html | 1605 ++++ guide-qupath-objects.html | 1714 +++++ guide-register-abba.html | 1782 +++++ images/hq-pipeline.svg | 4 + index.html | 1393 ++++ javascripts/katex.js | 10 + main-citing.html | 1285 ++++ main-configuration-files.html | 1703 +++++ main-getting-help.html | 1290 ++++ main-getting-started.html | 1541 ++++ main-using-notebooks.html | 1322 ++++ overrides/main.html | 11 + search/search_index.js | 1 + search/search_index.json | 1 + sitemap.xml | 3 + sitemap.xml.gz | Bin 0 -> 127 bytes stylesheets/extra.css | 24 + tips-abba.html | 1290 ++++ tips-brain-contours.html | 1294 ++++ tips-formats.html | 1566 ++++ tips-qupath.html | 1358 ++++ 150 files changed, 78706 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 api-compute.html create mode 100644 api-config-config.html create mode 100644 api-config.html create mode 100644 api-display.html create mode 100644 api-io.html create mode 100644 api-process.html create mode 100644 api-script-pyramids.html create mode 100644 api-script-qupath-script-runner.html create mode 100644 api-script-segment.html create mode 100644 api-seg.html create mode 100644 api-utils.html create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/external/cdn.jsdelivr.net/gh/jdecked/twemoji@15.1.0/assets/svg/26a0.svg create mode 100644 assets/external/fonts.googleapis.com/css.49ea35f2.css create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TjASc-CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TjASc0CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TjASc1CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TjASc2CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TjASc3CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TjASc5CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TjASc6CsQ.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TzBic-CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TzBic0CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TzBic1CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TzBic2CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TzBic3CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TzBic5CsTKlA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOjCnqEu92Fr1Mu51TzBic6CsQ.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOkCnqEu92Fr1Mu51xEIzIFKw.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOkCnqEu92Fr1Mu51xFIzIFKw.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOkCnqEu92Fr1Mu51xGIzIFKw.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOkCnqEu92Fr1Mu51xHIzIFKw.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOkCnqEu92Fr1Mu51xIIzI.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOkCnqEu92Fr1Mu51xLIzIFKw.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOkCnqEu92Fr1Mu51xMIzIFKw.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmSU5fABc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmSU5fBBc4.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmSU5fBxc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmSU5fCBc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmSU5fCRc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmSU5fChc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmSU5fCxc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmWUlfABc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmWUlfBBc4.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmWUlfBxc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmWUlfCBc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmWUlfCRc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmWUlfChc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOlCnqEu92Fr1MmWUlfCxc4EsA.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOmCnqEu92Fr1Mu4WxKOzY.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOmCnqEu92Fr1Mu4mxK.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOmCnqEu92Fr1Mu5mxKOzY.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOmCnqEu92Fr1Mu72xKOzY.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOmCnqEu92Fr1Mu7GxKOzY.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOmCnqEu92Fr1Mu7WxKOzY.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/roboto/v32/KFOmCnqEu92Fr1Mu7mxKOzY.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xTDF4xlVMF-BfR8bXMIhJHg45mwgGEFl0_3vrtSM1J-gEPT5Ese6hmHSV0mf0h.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xTDF4xlVMF-BfR8bXMIhJHg45mwgGEFl0_3vrtSM1J-gEPT5Ese6hmHSZ0mf0h.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xTDF4xlVMF-BfR8bXMIhJHg45mwgGEFl0_3vrtSM1J-gEPT5Ese6hmHSd0mf0h.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xTDF4xlVMF-BfR8bXMIhJHg45mwgGEFl0_3vrtSM1J-gEPT5Ese6hmHSh0mQ.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xTDF4xlVMF-BfR8bXMIhJHg45mwgGEFl0_3vrtSM1J-gEPT5Ese6hmHSt0mf0h.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xTDF4xlVMF-BfR8bXMIhJHg45mwgGEFl0_3vrtSM1J-gEPT5Ese6hmHSx0mf0h.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xdDF4xlVMF-BfR8bXMIjhOsXG-q2oeuFoqFrlnAIe2Imhk1T8rbociImtElOUlYIw.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xdDF4xlVMF-BfR8bXMIjhOsXG-q2oeuFoqFrlnAIe2Imhk1T8rbociImtEleUlYIw.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xdDF4xlVMF-BfR8bXMIjhOsXG-q2oeuFoqFrlnAIe2Imhk1T8rbociImtEluUlYIw.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xdDF4xlVMF-BfR8bXMIjhOsXG-q2oeuFoqFrlnAIe2Imhk1T8rbociImtEm-Ul.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xdDF4xlVMF-BfR8bXMIjhOsXG-q2oeuFoqFrlnAIe2Imhk1T8rbociImtEmOUlYIw.woff2 create mode 100644 assets/external/fonts.gstatic.com/s/robotomono/v23/L0xdDF4xlVMF-BfR8bXMIjhOsXG-q2oeuFoqFrlnAIe2Imhk1T8rbociImtEn-UlYIw.woff2 create mode 100644 assets/external/unpkg.com/iframe-worker/shim.js create mode 100644 assets/external/unpkg.com/katex@0/dist/contrib/auto-render.min.js create mode 100644 assets/external/unpkg.com/katex@0/dist/katex.min.css create mode 100644 assets/external/unpkg.com/katex@0/dist/katex.min.js create mode 100644 assets/external/unpkg.com/mermaid@11/dist/mermaid.min.js create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.83f73b43.min.js create mode 100644 assets/javascripts/bundle.83f73b43.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js.map create mode 100644 assets/stylesheets/main.6f8fc17f.min.css create mode 100644 assets/stylesheets/main.6f8fc17f.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 demo_notebooks/cells_distributions.html create mode 100644 demo_notebooks/cells_distributions.ipynb create mode 100644 demo_notebooks/density_map.html create mode 100644 demo_notebooks/density_map.ipynb create mode 100644 demo_notebooks/fibers_coverage.html create mode 100644 demo_notebooks/fibers_coverage.ipynb create mode 100644 demo_notebooks/fibers_length_multi.html create mode 100644 demo_notebooks/fibers_length_multi.ipynb create mode 100644 guide-create-pyramids.html create mode 100644 guide-install-abba.html create mode 100644 guide-pipeline.html create mode 100644 guide-prepare-qupath.html create mode 100644 guide-qupath-objects.html create mode 100644 guide-register-abba.html create mode 100644 images/hq-pipeline.svg create mode 100644 index.html create mode 100644 javascripts/katex.js create mode 100644 main-citing.html create mode 100644 main-configuration-files.html create mode 100644 main-getting-help.html create mode 100644 main-getting-started.html create mode 100644 main-using-notebooks.html create mode 100644 overrides/main.html create mode 100644 search/search_index.js create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 stylesheets/extra.css create mode 100644 tips-abba.html create mode 100644 tips-brain-contours.html create mode 100644 tips-formats.html create mode 100644 tips-qupath.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..8425b0d --- /dev/null +++ b/404.html @@ -0,0 +1,1175 @@ + + + +
+ + + + + + + + + + + + + + + + + + +compute module, part of histoquant.
+Contains actual computation functions.
+ + + + + + + + +
get_distribution(df, col, hue, hue_filter, per_commonnorm, binlim, nbins=100)
+
+#Computes distribution of objects.
+A global distribution using only col
is computed, then it computes a distribution
+distinguishing values in the hue
column. For the latter, it is possible to use a
+subset of the data ony, based on another column using hue_filter
. This another
+column is determined with hue
, if the latter is "hemisphere", then hue_filter
is
+used in the "channel" color and vice-versa.
+per_commonnorm
controls how they are normalized, either as a whole (True) or
+independantly (False).
Use cases :
+(1) single-channel, two hemispheres : col=x
, hue=hemisphere
, hue_filter=""
,
+per_commonorm=True
. Computes a distribution for each hemisphere, the sum of the
+area of both is equal to 1.
+(2) three-channels, one hemisphere : col=x
, hue=channel
,
+hue_filter="Ipsi.", per_commonnorm=False
. Computes a distribution for each channel
+only for points in the ipsilateral hemisphere. Each curve will have an area of 1.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ col
+ |
+
+ str
+ |
+
+
+
+ Key in |
+ + required + | +
+ hue
+ |
+
+ str
+ |
+
+
+
+ Key in |
+ + required + | +
+ hue_filter
+ |
+
+ str
+ |
+
+
+
+ Further filtering for "per" distribution. +- hue = channel : value is the name of one of the hemisphere +- hue = hemisphere : value can be the name of a channel, a list of such or "all" + |
+ + required + | +
+ per_commonnorm
+ |
+
+ bool
+ |
+
+
+
+ Use common normalization for all hues (per argument). + |
+ + required + | +
+ binlim
+ |
+
+ list or tuple
+ |
+
+
+
+ First bin left edge and last bin right edge. + |
+ + required + | +
+ nbins
+ |
+
+ int
+ |
+
+
+
+ Number of bins. Default is 100. + |
+
+ 100
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df_distribution |
+ DataFrame
+ |
+
+
+
+ DataFrame with |
+
histoquant/compute.py
180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 |
|
get_regions_metrics(df_annotations, object_type, channel_names, meas_base_name, metrics_names)
+
+#Get a new DataFrame with cumulated axons segments length in each brain regions.
+This is the quantification per brain regions for fibers-like objects, eg. axons. The +returned DataFrame has columns "cum. length µm", "cum. length mm", "density µm^-1", +"density mm^-1", "coverage index".
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df_annotations
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame with an entry for each brain regions, with columns "Area µm^2", +"Name", "hemisphere", and "{object_type: channel} Length µm". + |
+ + required + | +
+ object_type
+ |
+
+ str
+ |
+
+
+
+ Object type (primary classification). + |
+ + required + | +
+ channel_names
+ |
+
+ dict
+ |
+
+
+
+ Map between original channel names to something else. + |
+ + required + | +
+ meas_base_name
+ |
+
+ str
+ |
+
+
+
+
+ |
+ + required + | +
+ metrics_names
+ |
+
+ dict
+ |
+
+
+
+
+ |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
df_regions |
+ DataFrame
+ |
+
+
+
+ DataFrame with brain regions name, area and metrics. + |
+
histoquant/compute.py
13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 |
|
normalize_starter_cells(df, cols, animal, info_file, channel_names)
+
+#Normalize data by the number of starter cells.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ Contains the data to be normalized. + |
+ + required + | +
+ cols
+ |
+
+ list - like
+ |
+
+
+
+ Columns to divide by the number of starter cells. + |
+ + required + | +
+ animal
+ |
+
+ str
+ |
+
+
+
+ Animal ID to parse the number of starter cells. + |
+ + required + | +
+ info_file
+ |
+
+ str
+ |
+
+
+
+ Full path to the TOML file with informations. + |
+ + required + | +
+ channel_names
+ |
+
+ dict
+ |
+
+
+
+ Map between original channel names to something else. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ DataFrame
+ |
+
+
+
+ Same |
+
histoquant/compute.py
object_type
: name of QuPath base classification (eg. without the ": subclass" part)
+segmentation_tag
: type of segmentation, matches directory name, used only in the full pipeline
Information related to the atlas used
+name
: brainglobe-atlasapi atlas name
+type
: "brain" or "cord" (eg. registration done in ABBA or abba_python). This will determine whether to flip Left/Right when determining detections hemisphere based on their coordinates. Also adapts the axes in the 2D heatmaps.
+midline
: midline Z coordinates (left/right limit) in microns to determine detections hemisphere based on their coordinates.
+outline_structures
: structures to show an outline of in heatmaps
Information related to imaging channels
+Must contain all classifications derived from "object_type" you want to process. In the form subclassification name = name to display on the plots
"marker+"
: classification name = name to display
+"marker-"
: add any number of sub-classification
Must have same keys as "names" keys, in the form subclassification name = color
, with color specified as a matplotlib named color, an RGB list or an hex code.
"marker+"
: classification name = matplotlib color
+"marker-"
: must have the same entries as "names".
Information related to hemispheres, same structure as channels
+Left
: Left = name to display
+Right
: Right = name to display
Must have same keys as names' keys
+Left
: ff516e" # Left = matplotlib color (either #hex, color name or RGB list)
+Right
: 960010" # Right = matplotlib color
Spatial distributions parameters
+stereo
: use stereotaxic coordinates (as in Paxinos, only for mouse brain CCFv3)
+ap_lim
: bins limits for anterio-posterior in mm
+ap_nbins
: number of bins for anterio-posterior
+dv_lim
: bins limits for dorso-ventral in mm
+dv_nbins
: number of bins for dorso-ventral
+ml_lim
: bins limits for medio-lateral in mm
+ml_nbins
: number of bins for medio-lateral
+hue
: color curves with this parameter, must be "hemisphere" or "channel"
+hue_filter
: use only a subset of data
common_norm
: use a global normalization (eg. the sum of areas under all curves is 1). Otherwise, normalize each hue individually
Display parameters
+show_injection
: add a patch showing the extent of injection sites. Uses corresponding channel colors. Requires the information TOML configuration file set up
+cmap
: matplotlib color map for 2D heatmaps
+cmap_nbins
: number of bins for 2D heatmaps
+cmap_lim
: color limits for 2D heatmaps
Distributions per regions parameters
+base_measurement
: the name of the measurement in QuPath to derive others from. Usually "Count" or "Length µm"
+hue
: color bars with this parameter, must be "hemisphere" or "channel"
+hue_filter
: use only a subset of data
hue_mirror
: plot two hue_filter in mirror instead of discarding the others. For example, if hue=channel and hue_filter="both", plots the two hemisphere in mirror.
+normalize_starter_cells
: normalize non-relative metrics by the number of starter cells
Names of metrics. The keys are used internally in histoquant as is so should NOT be modified. The values will only chang etheir names in the ouput file
+"density µm^-2"
: relevant name
+"density mm^-2"
: relevant name
+"coverage index"
: relevant name
+"relative measurement"
: relevant name
+"relative density"
: relevant name
nregions
: number of regions to display (sorted by max.)
+orientation
: orientation of the bars ("h" or "v")
+order
: order the regions by "ontology" or by "max". Set to "max" to provide a custom order
+dodge
: enforce the bar not being stacked
+log_scale
: use log. scale for metrics
name of metrics to display
+"count"
: real_name = display_name, with real_name the "values" in [regions.metrics]
+"density mm^-2"
Full path to information TOML files and atlas outlines for 2D heatmaps.
+blacklist
+fusion
+outlines
+infos
config module, part of histoquant.
+Contains the Config class.
+ + + + + + + + +
Config(config_file)
+
+#The configuration class.
+Reads input configuration file and provides its constant.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ config_file
+ |
+
+ str
+ |
+
+
+
+ Full path to the configuration file to load. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
cfg |
+ Config object.
+ |
+
+
+
+
+ |
+
Constructor.
+ + + + + + +histoquant/config.py
get_blacklist()
+
+#
get_hue_palette(mode)
+
+#Get color palette given hue.
+Maps hue to colors in channels or hemispheres.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ mode
+ |
+
+ (hemisphere, channel)
+ |
+
+
+
+
+ |
+
+ "hemisphere"
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
palette |
+ dict
+ |
+
+
+
+ Maps a hue level to a color, usable in seaborn. + |
+
histoquant/config.py
get_injection_sites(animals)
+
+#Get list of injection sites coordinates for each animals, for each channels.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ animals
+ |
+
+ list of str
+ |
+
+
+
+ List of animals. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
injection_sites |
+ dict
+ |
+
+
+
+ {"x": {channel0: [x]}, "y": {channel1: [y]}} + |
+
histoquant/config.py
display module, part of histoquant.
+Contains display functions, essentially wrapping matplotlib and seaborn functions.
+ + + + + + + + +
add_data_coverage(df, ax, colors=None, **kwargs)
+
+#Add lines below the plot to represent data coverage.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame with |
+ + required + | +
+ ax
+ |
+
+ Axes
+ |
+
+
+
+ Handle to axes where to add the patch. + |
+ + required + | +
+ colors
+ |
+
+ list or str or None
+ |
+
+
+
+ Colors for the patches, as a RGB list or hex list. Should be the same size as
+the number of patches to plot, eg. the number of columns in |
+
+ None
+ |
+
+ **kwargs
+ |
+
+ passed to patches.Rectangle()
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ Axes
+ |
+
+
+
+ Handle to updated axes. + |
+
histoquant/display.py
46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 |
|
add_injection_patch(X, ax, **kwargs)
+
+#Add a patch representing the injection sites.
+The patch will span from the minimal coordinate to the maximal. +If plotted in stereotaxic coordinates, coordinates should be converted beforehand.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ X
+ |
+
+ list
+ |
+
+
+
+ Coordinates in mm for each animals. Can be empty to not plot anything. + |
+ + required + | +
+ ax
+ |
+
+ Axes
+ |
+
+
+
+ Handle to axes where to add the patch. + |
+ + required + | +
+ **kwargs
+ |
+
+ passed to Axes.axvspan
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ Axes
+ |
+
+
+
+ Handle to updated Axes. + |
+
histoquant/display.py
draw_structure_outline(view='sagittal', structures=['root'], outline_file='', ax=None, microns=False, **kwargs)
+
+#Plot brain regions outlines in given projection.
+This requires a file containing the structures outlines.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ view
+ |
+
+ str
+ |
+
+
+
+ Projection, "sagittal", "coronal" or "top". Default is "sagittal". + |
+
+ 'sagittal'
+ |
+
+ structures
+ |
+
+ list[str]
+ |
+
+
+
+ List of structures acronyms whose outlines will be drawn. Default is ["root"]. + |
+
+ ['root']
+ |
+
+ outline_file
+ |
+
+ str
+ |
+
+
+
+ Full path the outlines HDF5 file. + |
+
+ ''
+ |
+
+ ax
+ |
+
+ Axes or None
+ |
+
+
+
+ Axes where to plot the outlines. If None, get current axes (the default). + |
+
+ None
+ |
+
+ microns
+ |
+
+ bool
+ |
+
+
+
+ If False (default), converts the coordinates in mm. + |
+
+ False
+ |
+
+ **kwargs
+ |
+
+ passed to pyplot.plot()
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ Axes
+ |
+
+
+
+
+ |
+
histoquant/display.py
111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 |
|
nice_bar_plot(df, x='', y=[''], hue='', ylabel=[''], orient='h', nx=None, ordering=None, names_list=None, hue_mirror=False, log_scale=False, bar_kws={}, pts_kws={})
+
+#Nice bar plot of per-region objects distribution.
+This is used for objects distribution across brain regions. Shows the y
metric
+(count, aeral density, cumulated length...) in each x
categories (brain regions).
+orient
controls wether the bars are shown horizontally (default) or vertically.
+Input df
must have an additional "hemisphere" column. All y
are plotted in the
+same figure as different subplots. nx
controls the number of displayed regions.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ x
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ ''
+ |
+
+ y
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ ''
+ |
+
+ hue
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ ''
+ |
+
+ ylabel
+ |
+
+ list of str
+ |
+
+
+
+ Y axis labels. + |
+
+ ['']
+ |
+
+ orient
+ |
+
+ h or v
+ |
+
+
+
+ "h" for horizontal bars (default) or "v" for vertical bars. + |
+
+ 'h'
+ |
+
+ nx
+ |
+
+ None or int
+ |
+
+
+
+ Number of |
+
+ None
+ |
+
+ ordering
+ |
+
+ None or list[str] or max
+ |
+
+
+
+ Sorted list of acronyms. Data will be sorted follwowing this order, if "max", +sorted by descending values, if None, not sorted (default). + |
+
+ None
+ |
+
+ names_list
+ |
+
+ list or None
+ |
+
+
+
+ List of names to display. If None (default), takes the most prominent overall +ones. + |
+
+ None
+ |
+
+ hue_mirror
+ |
+
+ bool
+ |
+
+
+
+ If there are 2 groups, plot in mirror. Default is False. + |
+
+ False
+ |
+
+ log_scale
+ |
+
+ bool
+ |
+
+
+
+ Set the metrics in log scale. Default is False. + |
+
+ False
+ |
+
+ bar_kws
+ |
+
+ dict
+ |
+
+
+
+ Passed to seaborn.barplot(). + |
+
+ {}
+ |
+
+ pts_kws
+ |
+
+ dict
+ |
+
+
+
+ Passed to seaborn.stripplot(). + |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
figs |
+ list
+ |
+
+
+
+ List of figures. + |
+
histoquant/display.py
178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 |
|
nice_distribution_plot(df, x='', y='', hue=None, xlabel='', ylabel='', injections_sites={}, channel_colors={}, channel_names={}, ax=None, **kwargs)
+
+#Nice plot of 1D distribution of objects.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ x
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ y
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ hue
+ |
+
+ str or None
+ |
+
+
+
+ Key in |
+
+ None
+ |
+
+ xlabel
+ |
+
+ str
+ |
+
+
+
+ X and Y axes labels. + |
+
+ ''
+ |
+
+ ylabel
+ |
+
+ str
+ |
+
+
+
+ X and Y axes labels. + |
+
+ ''
+ |
+
+ injections_sites
+ |
+
+ dict
+ |
+
+
+
+ List of injection sites 1D coordinates in a dict with the channel name as key. +If empty, injection site is not plotted (default). + |
+
+ {}
+ |
+
+ channel_colors
+ |
+
+ dict
+ |
+
+
+
+ Required if injections_sites is not empty, dict mapping channel names to a +color. + |
+
+ {}
+ |
+
+ channel_names
+ |
+
+ dict
+ |
+
+
+
+ Required if injections_sites is not empty, dict mapping channel names to a +display name. + |
+
+ {}
+ |
+
+ ax
+ |
+
+ Axes or None
+ |
+
+
+
+ Axes in which to plot the figure, if None, a new figure is created (default). + |
+
+ None
+ |
+
+ **kwargs
+ |
+
+ passed to seaborn.lineplot()
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ matplotlib axes
+ |
+
+
+
+ Handle to axes. + |
+
histoquant/display.py
416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 |
|
nice_heatmap(df, animals, x='', y='', xlabel='', ylabel='', invertx=False, inverty=False, **kwargs)
+
+#Nice plots of 2D distribution of boutons as a heatmap per animal.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ animals
+ |
+
+ list-like of str
+ |
+
+
+
+ List of animals. + |
+ + required + | +
+ x
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ y
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ xlabel
+ |
+
+ str
+ |
+
+
+
+ Labels of x and y axes. + |
+
+ ''
+ |
+
+ ylabel
+ |
+
+ str
+ |
+
+
+
+ Labels of x and y axes. + |
+
+ ''
+ |
+
+ invertx
+ |
+
+ bool
+ |
+
+
+
+ Wether to inverse the x or y axes. Default is False. + |
+
+ False
+ |
+
+ inverty
+ |
+
+ bool
+ |
+
+
+
+ Wether to inverse the x or y axes. Default is False. + |
+
+ False
+ |
+
+ **kwargs
+ |
+
+ passed to seaborn.histplot()
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ Axes or list of Axes
+ |
+
+
+
+ Handle to axes. + |
+
histoquant/display.py
nice_joint_plot(df, x='', y='', xlabel='', ylabel='', invertx=False, inverty=False, outline_kws={}, ax=None, **kwargs)
+
+#Joint distribution.
+Used to display a 2D heatmap of objects. This is more qualitative than quantitative, +for display purposes.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ x
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ y
+ |
+
+ str
+ |
+
+
+
+ Keys in |
+
+ ''
+ |
+
+ xlabel
+ |
+
+ str
+ |
+
+
+
+ Label of x and y axes. + |
+
+ ''
+ |
+
+ ylabel
+ |
+
+ str
+ |
+
+
+
+ Label of x and y axes. + |
+
+ ''
+ |
+
+ invertx
+ |
+
+ bool
+ |
+
+
+
+ Whether to inverse the x or y axes. Default is False for both. + |
+
+ False
+ |
+
+ inverty
+ |
+
+ bool
+ |
+
+
+
+ Whether to inverse the x or y axes. Default is False for both. + |
+
+ False
+ |
+
+ outline_kws
+ |
+
+ dict
+ |
+
+
+
+ Passed to draw_structure_outline(). + |
+
+ {}
+ |
+
+ ax
+ |
+
+ Axes or None
+ |
+
+
+
+ Axes to plot in. If None, draws in current axes (default). + |
+
+ None
+ |
+
+ **kwargs
+ |
+ + | +
+
+
+ Passed to seaborn.histplot. + |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
ax |
+ Axes
+ |
+
+
+
+
+ |
+
histoquant/display.py
492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 |
|
plot_1D_distributions(dfs_distributions, cfg, df_coordinates=None)
+
+#Wraps nice_distribution_plot().
+ +histoquant/display.py
672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 +717 +718 +719 +720 +721 +722 +723 +724 +725 +726 +727 +728 +729 +730 +731 +732 +733 +734 +735 +736 +737 +738 +739 +740 +741 +742 +743 +744 +745 +746 +747 +748 +749 +750 +751 +752 +753 +754 +755 +756 +757 +758 +759 +760 +761 +762 +763 +764 |
|
plot_2D_distributions(df, cfg)
+
+#Wraps nice_joint_plot().
+ +histoquant/display.py
767 +768 +769 +770 +771 +772 +773 +774 +775 +776 +777 +778 +779 +780 +781 +782 +783 +784 +785 +786 +787 +788 +789 +790 +791 +792 +793 +794 +795 +796 +797 +798 +799 +800 +801 +802 +803 +804 +805 +806 +807 +808 +809 +810 +811 +812 +813 +814 +815 +816 +817 +818 +819 +820 +821 +822 +823 +824 +825 +826 +827 +828 +829 +830 +831 +832 +833 +834 +835 +836 +837 +838 +839 +840 +841 +842 +843 +844 +845 +846 +847 +848 +849 +850 +851 +852 +853 +854 +855 +856 +857 +858 +859 +860 +861 +862 +863 +864 +865 +866 +867 +868 +869 +870 +871 +872 +873 +874 +875 +876 +877 +878 +879 +880 +881 +882 +883 +884 +885 +886 +887 +888 +889 +890 +891 +892 |
|
plot_regions(df, cfg, **kwargs)
+
+#Wraps nice_bar_plot().
+ +histoquant/display.py
io module, part of histoquant.
+Contains loading and saving functions.
+ + + + + + + + +
cat_csv_dir(directory, **kwargs)
+
+#Scans a directory for csv files and concatenate them into a single DataFrame.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ directory
+ |
+
+ str
+ |
+
+
+
+ Path to the directory to scan. + |
+ + required + | +
+ **kwargs
+ |
+
+ passed to pandas.read_csv()
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ All CSV files concatenated in a single DataFrame. + |
+
histoquant/io.py
cat_data_dir(directory, segtype, **kwargs)
+
+#Wraps either cat_csv_dir() or cat_json_dir() depending on segtype
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ directory
+ |
+
+ str
+ |
+
+
+
+ Path to the directory to scan. + |
+ + required + | +
+ segtype
+ |
+
+ str
+ |
+
+
+
+ "synaptophysin" or "fibers". + |
+ + required + | +
+ **kwargs
+ |
+
+ passed to cat_csv_dir() or cat_json_dir().
+ |
+
+
+
+
+ |
+
+ {}
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ All files concatenated in a single DataFrame. + |
+
histoquant/io.py
cat_json_dir(directory, hemisphere_names, atlas)
+
+#Scans a directory for json files and concatenate them in a single DataFrame.
+The json files must be generated with 'workflow_import_export.groovy" from a QuPath +project.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ directory
+ |
+
+ str
+ |
+
+
+
+ Path to the directory to scan. + |
+ + required + | +
+ hemisphere_names
+ |
+
+ dict
+ |
+
+
+
+ Maps between hemisphere names in the json files ("Right" and "Left") to +something else (eg. "Ipsi." and "Contra."). + |
+ + required + | +
+ atlas
+ |
+
+ BrainGlobeAtlas
+ |
+
+
+
+ Atlas to read regions from. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ All JSON files concatenated in a single DataFrame. + |
+
histoquant/io.py
114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 |
|
check_empty_file(filename, threshold=1)
+
+#Checks if a file is empty.
+Empty is defined as a file whose number of lines is lower than or equal to
+threshold
(to allow for headers).
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ filename
+ |
+
+ str
+ |
+
+
+
+ Full path to the file to check. + |
+ + required + | +
+ threshold
+ |
+
+ int
+ |
+
+
+
+ If number of lines is lower than or equal to this value, it is considered as +empty. Default is 1. + |
+
+ 1
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
empty |
+ bool
+ |
+
+
+
+ True if the file is empty as defined above. + |
+
histoquant/io.py
get_measurements_directory(wdir, animal, kind, segtype)
+
+#Get the directory with detections or annotations measurements for given animal ID.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ wdir
+ |
+
+ str
+ |
+
+
+
+ Base working directory. + |
+ + required + | +
+ animal
+ |
+
+ str
+ |
+
+
+
+ Animal ID. + |
+ + required + | +
+ kind
+ |
+
+ str
+ |
+
+
+
+ "annotation" or "detection". + |
+ + required + | +
+ segtype
+ |
+
+ str
+ |
+
+
+
+ Type of segmentation, eg. "synaptophysin". + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
directory |
+ str
+ |
+
+
+
+ Path to detections or annotations directory. + |
+
histoquant/io.py
load_dfs(filepath, fmt, identifiers=['df_regions', 'df_coordinates', 'df_distribution_ap', 'df_distribution_dv', 'df_distribution_ml'])
+
+#Load DataFrames from file.
+If fmt
is "h5" ("xslx"), identifiers are interpreted as h5 group identifier (sheet
+name, respectively).
+If fmt
is "pickle", "csv" or "tsv", identifiers are appended to filename
.
+Path to the file can't have a dot (".") in it.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ filepath
+ |
+
+ str
+ |
+
+
+
+ Full path to the file(s), without extension. + |
+ + required + | +
+ fmt
+ |
+
+ (h5, csv, pickle, xlsx)
+ |
+
+
+
+ File(s) format. + |
+
+ "h5"
+ |
+
+ identifiers
+ |
+
+ list of str
+ |
+
+
+
+ List of identifiers to load from files. Defaults to the ones saved in +histoquant.process.process_animals(). + |
+
+ ['df_regions', 'df_coordinates', 'df_distribution_ap', 'df_distribution_dv', 'df_distribution_ml']
+ |
+
Returns:
+Type | +Description | +
---|---|
+ All requested DataFrames.
+ |
+
+
+
+
+ |
+
histoquant/io.py
save_dfs(out_dir, filename, dfs)
+
+#Save DataFrames to file.
+File format is inferred from file name extension.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ out_dir
+ |
+
+ str
+ |
+
+
+
+ Output directory. + |
+ + required + | +
+ filename
+ |
+
+ _type_
+ |
+
+
+
+ File name. + |
+ + required + | +
+ dfs
+ |
+
+ dict
+ |
+
+
+
+ DataFrames to save, as {identifier: df}. If HDF5 or xlsx, all df are saved in +the same file, otherwise identifier is appended to the file name. + |
+ + required + | +
histoquant/io.py
process module, part of histoquant.
+Wraps other functions for a click&play behaviour. Relies on the configuration file.
+ + + + + + + + +
process_animal(animal, df_annotations, df_detections, cfg, compute_distributions=True)
+
+#Quantify objects for one animal.
+Fetch required files and compute objects' distributions in brain regions, spatial +distributions and gather Atlas coordinates.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ animal
+ |
+
+ str
+ |
+
+
+
+ Animal ID. + |
+ + required + | +
+ df_annotations
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrames of QuPath Annotations and Detections. + |
+ + required + | +
+ df_detections
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrames of QuPath Annotations and Detections. + |
+ + required + | +
+ cfg
+ |
+
+ Config
+ |
+
+
+
+ The configuration loaded from TOML configuration file. + |
+ + required + | +
+ compute_distributions
+ |
+
+ bool
+ |
+
+
+
+ If False, do not compute the 1D distributions and return an empty list.Default +is True. + |
+
+ True
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df_regions |
+ DataFrame
+ |
+
+
+
+ Metrics in brain regions. One entry for each hemisphere of each brain regions. + |
+
df_distribution |
+ list of pandas.DataFrame
+ |
+
+
+
+ Rostro-caudal distribution, as raw count and probability density function, in +each axis. + |
+
df_coordinates |
+ DataFrame
+ |
+
+
+
+ Atlas coordinates of each points. + |
+
histoquant/process.py
15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 |
|
process_animals(wdir, animals, cfg, out_fmt=None, compute_distributions=True)
+
+#Get data from all animals and plot.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ wdir
+ |
+
+ str
+ |
+
+
+
+ Base working directory, containing |
+ + required + | +
+ animals
+ |
+
+ list-like of str
+ |
+
+
+
+ List of animals ID. + |
+ + required + | +
+ cfg
+ |
+ + | +
+
+
+ Configuration object. + |
+ + required + | +
+ out_fmt
+ |
+
+ (None, h5, csv, tsv, xslx, pickle)
+ |
+
+
+
+ Output file(s) format, if None, nothing is saved (default). + |
+
+ None
+ |
+
+ compute_distributions
+ |
+
+ bool
+ |
+
+
+
+ If False, do not compute the 1D distributions and return an empty list.Default +is True. + |
+
+ True
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df_regions |
+ DataFrame
+ |
+
+
+
+ Metrics in brain regions. One entry for each hemisphere of each brain regions. + |
+
df_distribution |
+ list of pandas.DataFrame
+ |
+
+
+
+ Rostro-caudal distribution, as raw count and probability density function, in +each axis. + |
+
df_coordinates |
+ DataFrame
+ |
+
+
+
+ Atlas coordinates of each points. + |
+
histoquant/process.py
172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 |
|
create_pyramids command line interface (CLI). +You can set up your settings filling the variables at the top of the file and run the +script :
+++python create_pyramids.py /path/to/your/images
+
Or alternatively, you can run the script as a CLI :
+++python create_pyramids.py [options] /path/to/your/images
+
Example :
+++python create_pyramids.py --tile-size 1024 --pyramid-factor 4 /path/to/your/images
+
To get help (eg. list all options), run :
+++python create_pyramids.py --help
+
To use the QuPath backend, you'll need the companion 'createPyramids.groovy' script.
+author : Guillaume Le Goc (g.legoc@posteo.org) @ NeuroPSI +version : 2024.11.19
+ + + + + + + + +
COMPRESSION_PYTHON: str = 'LZW'
+
+
+ module-attribute
+
+
+#Compression method.
+
INEXT: str = 'ome.tiff'
+
+
+ module-attribute
+
+
+#Input files extension.
+
NTHREADS: int = int(multiprocessing.cpu_count() / 2)
+
+
+ module-attribute
+
+
+#Number of threads for parallelization.
+
PYRAMID_FACTOR: int = 2
+
+
+ module-attribute
+
+
+#Factor between two consecutive pyramid levels.
+
PYRAMID_MAX: int = 32
+
+
+ module-attribute
+
+
+#Maximum rescaling (smaller pyramid).
+
QUPATH_PATH: str = 'C:/Users/glegoc/AppData/Local/QuPath-0.5.1/QuPath-0.5.1 (console).exe'
+
+
+ module-attribute
+
+
+#Full path to the QuPath (console) executable.
+
SCRIPT_PATH: str = os.path.join(os.path.dirname(__file__), 'createPyramids.groovy')
+
+
+ module-attribute
+
+
+#Full path to the groovy script that does the job.
+
TILE_SIZE: int = 512
+
+
+ module-attribute
+
+
+#Tile size (usually 512 or 1024).
+
USE_QUPATH: bool = True
+
+
+ module-attribute
+
+
+#Use QuPath and the external groovy script instead of pure python (more reliable).
+
get_tiff_options(compression, nthreads, tilesize)
+
+#Get the relevant tags and options to write a TIFF file.
+The returned dict is meant to be used to write a new tiff page with those tags.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ compression
+ |
+
+ str
+ |
+
+
+
+ Tiff compression (None, LZW, ...). + |
+ + required + | +
+ nthreads
+ |
+
+ int
+ |
+
+
+
+ Number of threads to write tiles. + |
+ + required + | +
+ tilesize
+ |
+
+ int
+ |
+
+
+
+ Tile size in pixels. Should be a power of 2. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
options |
+ dict
+ |
+
+
+
+ Dictionary with Tiff tags. + |
+
scripts/pyramids/create_pyramids.py
pyramidalize_directory(inputdir, version=None, use_qupath=USE_QUPATH, tile_size=TILE_SIZE, pyramid_factor=PYRAMID_FACTOR, nthreads=NTHREADS, qupath_path=QUPATH_PATH, script_path=SCRIPT_PATH, pyramid_max=PYRAMID_MAX)
+
+#Create pyramidal versions of .ome.tiff images found in the input directory. +You need to edit the script to set the "QUPATH_PATH" to your installation of QuPath. +Usually on Windows it should be here : +C:/Users/$USERNAME$/AppData/Local/QuPath-0.X.Y/QuPath-0.X.Y (console).exe +Alternatively you can run the script with the --qupath-path option.
+ +scripts/pyramids/create_pyramids.py
261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 |
|
pyramidalize_python(image_path, output_image, levels, tiffoptions)
+
+#Pyramidalization with tifffile and scikit-image.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ image_path
+ |
+
+ str
+ |
+
+
+
+ Full path to the image. + |
+ + required + | +
+ output_image
+ |
+
+ str
+ |
+
+
+
+ Full path to the pyramidalized image. + |
+ + required + | +
+ levels
+ |
+
+ list-like of int
+ |
+
+
+
+ Pyramids levels. + |
+ + required + | +
+ tiffoptions
+ |
+
+ dict
+ |
+
+
+
+ Options for TiffWriter. + |
+ + required + | +
scripts/pyramids/create_pyramids.py
113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 |
|
pyramidalize_qupath(image_path, output_image, qupath_path, script_path, tile_size, pyramid_factor, nthreads)
+
+#Pyramidalization with QuPath backend.
+ +scripts/pyramids/create_pyramids.py
Template to show how to run groovy script with QuPath, multi-threaded.
+ + + + + + + + +
EXCLUDE_LIST = []
+
+
+ module-attribute
+
+
+#Images names to NOT run the script on.
+
NTHREADS = 5
+
+
+ module-attribute
+
+
+#Number of threads to use.
+
QPROJ_PATH = '/path/to/qupath/project.qproj'
+
+
+ module-attribute
+
+
+#Full path to the QuPath project.
+
QUIET = True
+
+
+ module-attribute
+
+
+#Use QuPath in quiet mode, eg. with minimal verbosity.
+
QUPATH_EXE = '/path/to/the/qupath/QuPath-0.5.1 (console).exe'
+
+
+ module-attribute
+
+
+#Path to the QuPath executable (console mode).
+
SAVE = True
+
+
+ module-attribute
+
+
+#Whether to save the project after the script ran on an image.
+
SCRIPT_PATH = '/path/to/the/script.groovy'
+
+
+ module-attribute
+
+
+#Path to the groovy script.
+Script to segment objects from images.
+For fiber-like objects, binarize and skeletonize the image, then use skan
to extract
+branches coordinates.
+For polygon-like objects, binarize the image and detect objects and extract contours
+coordinates.
+For points, treat that as polygons then extract the centroids instead of contours.
+Finally, export the coordinates as collections in geojson files, importable in QuPath.
+Supports any number of channel of interest within the same image. One file output file
+per channel will be created.
This script uses histoquant.seg
. It is designed to work on probability maps generated
+from a pixel classifier in QuPath, but might work on raw images.
Usage : fill-in the Parameters section of the script and run it.
+A "geojson" folder will be created in the parent directory of IMAGES_DIR
.
+To exclude objects near the edges of an ROI, specify the path to masks stored as images
+with the same names as probabilities images (without their suffix).
author : Guillaume Le Goc (g.legoc@posteo.org) @ NeuroPSI +version : 2024.12.10
+ + + + + + + + +
CHANNELS_PARAMS = [{'name': 'cy5', 'target_channel': 0, 'proba_threshold': 0.85, 'qp_class': 'Fibers: Cy5', 'qp_color': [164, 250, 120]}, {'name': 'dsred', 'target_channel': 1, 'proba_threshold': 0.65, 'qp_class': 'Fibers: DsRed', 'qp_color': [224, 153, 18]}, {'name': 'egfp', 'target_channel': 2, 'proba_threshold': 0.85, 'qp_class': 'Fibers: EGFP', 'qp_color': [135, 11, 191]}]
+
+
+ module-attribute
+
+
+#This should be a list of dictionary (one per channel) with keys :
+
EDGE_DIST = 0
+
+
+ module-attribute
+
+
+#Distance to brain edge to ignore, in µm. 0 to disable.
+
FILTERS = {'length_low': 1.5, 'area_low': 10, 'area_high': 1000, 'ecc_low': 0.0, 'ecc_high': 0.9, 'dist_thresh': 30}
+
+
+ module-attribute
+
+
+#Dictionary with keys :
+
IMAGES_DIR = '/path/to/images'
+
+
+ module-attribute
+
+
+#Full path to the images to segment.
+
IMG_SUFFIX = '_Probabilities.tiff'
+
+
+ module-attribute
+
+
+#Images suffix, including extension. Masks must be the same name without the suffix.
+
MASKS_DIR = 'path/to/corresponding/masks'
+
+
+ module-attribute
+
+
+#Full path to the masks, to exclude objects near the brain edges (set to None or empty +string to disable this feature).
+
MASKS_EXT = 'tiff'
+
+
+ module-attribute
+
+
+#Masks files extension.
+
MAX_PIX_VALUE = 255
+
+
+ module-attribute
+
+
+#Maximum pixel possible value to adjust proba_threshold
.
ORIGINAL_PIXELSIZE = 0.45
+
+
+ module-attribute
+
+
+#Original images pixel size in microns. This is in case the pixel classifier uses +a lower resolution, yielding smaller probability maps, so output objects coordinates +need to be rescaled to the full size images. The pixel size is written in the "Image" +tab in QuPath.
+
QUPATH_TYPE = 'detection'
+
+
+ module-attribute
+
+
+#QuPath object type.
+
SEGTYPE = 'boutons'
+
+
+ module-attribute
+
+
+#Type of segmentation.
+
get_geojson_dir(images_dir)
+
+#Get the directory of geojson files, which will be in the parent directory
+of images_dir
.
If the directory does not exist, create it.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ images_dir
+ |
+
+ str
+ |
+
+
+
+
+ |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
geojson_dir |
+ str
+ |
+
+
+
+
+ |
+
scripts/segmentation/segment_images.py
get_geojson_properties(name, color, objtype='detection')
+
+#Return geojson objects properties as a dictionnary, ready to be used in geojson.Feature.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ name
+ |
+
+ str
+ |
+
+
+
+ Classification name. + |
+ + required + | +
+ color
+ |
+
+ tuple or list
+ |
+
+
+
+ Classification color in RGB (3-elements vector). + |
+ + required + | +
+ objtype
+ |
+
+ str
+ |
+
+
+
+ Object type ("detection" or "annotation"). Default is "detection". + |
+
+ 'detection'
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
props |
+ dict
+ |
+
+
+
+
+ |
+
scripts/segmentation/segment_images.py
get_seg_method(segtype)
+
+#Determine what kind of segmentation is performed.
+Segmentation kind are, for now, lines, polygons or points. We detect that based on +hardcoded keywords.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ segtype
+ |
+
+ str
+ |
+
+
+
+
+ |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
seg_method |
+ str
+ |
+
+
+
+
+ |
+
scripts/segmentation/segment_images.py
parameters_as_dict(images_dir, masks_dir, segtype, name, proba_threshold, edge_dist)
+
+#Get information as a dictionnary.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ images_dir
+ |
+
+ str
+ |
+
+
+
+ Path to images to be segmented. + |
+ + required + | +
+ masks_dir
+ |
+
+ str
+ |
+
+
+
+ Path to images masks. + |
+ + required + | +
+ segtype
+ |
+
+ str
+ |
+
+
+
+ Segmentation type (eg. "fibers"). + |
+ + required + | +
+ name
+ |
+
+ str
+ |
+
+
+
+ Name of the segmentation (eg. "green"). + |
+ + required + | +
+ proba_threshold
+ |
+
+ float < 1
+ |
+
+
+
+ Probability threshold. + |
+ + required + | +
+ edge_dist
+ |
+
+ float
+ |
+
+
+
+ Distance in µm to the brain edge that is ignored. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
params |
+ dict
+ |
+
+
+
+
+ |
+
scripts/segmentation/segment_images.py
process_directory(images_dir, img_suffix='', segtype='', original_pixelsize=1.0, target_channel=0, proba_threshold=0.0, qupath_class='Object', qupath_color=[0, 0, 0], channel_suffix='', edge_dist=0.0, filters={}, masks_dir='', masks_ext='')
+
+#Main function, processes the .ome.tiff files in the input directory.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ images_dir
+ |
+
+ str
+ |
+
+
+
+ Animal ID to process. + |
+ + required + | +
+ img_suffix
+ |
+
+ str
+ |
+
+
+
+ Images suffix, including extension. + |
+
+ ''
+ |
+
+ segtype
+ |
+
+ str
+ |
+
+
+
+ Segmentation type. + |
+
+ ''
+ |
+
+ original_pixelsize
+ |
+
+ float
+ |
+
+
+
+ Original images pixel size in microns. + |
+
+ 1.0
+ |
+
+ target_channel
+ |
+
+ int
+ |
+
+
+
+ Index of the channel containning the objects of interest (eg. not the +background), in the probability map (not the original images channels). + |
+
+ 0
+ |
+
+ proba_threshold
+ |
+
+ float < 1
+ |
+
+
+
+ Probability below this value will be discarded (multiplied by |
+
+ 0.0
+ |
+
+ qupath_class
+ |
+
+ str
+ |
+
+
+
+ Name of the QuPath classification. + |
+
+ 'Object'
+ |
+
+ qupath_color
+ |
+
+ list of three elements
+ |
+
+
+
+ Color associated to that classification in RGB. + |
+
+ [0, 0, 0]
+ |
+
+ channel_suffix
+ |
+
+ str
+ |
+
+
+
+ Channel name, will be used as a suffix in output geojson files. + |
+
+ ''
+ |
+
+ edge_dist
+ |
+
+ float
+ |
+
+
+
+ Distance to the edge of the brain masks that will be ignored, in microns. Set to +0 to disable this feature. + |
+
+ 0.0
+ |
+
+ filters
+ |
+
+ dict
+ |
+
+
+
+ Filters values to include or excludes objects. See the top of the script. + |
+
+ {}
+ |
+
+ masks_dir
+ |
+
+ str
+ |
+
+
+
+ Path to images masks, to exclude objects found near the edges. The masks must be +with the same name as the corresponding image to be segmented, without its +suffix. Default is "", which disables this feature. + |
+
+ ''
+ |
+
+ masks_ext
+ |
+
+ str
+ |
+
+
+
+ Masks files extension, without leading ".". Default is "" + |
+
+ ''
+ |
+
scripts/segmentation/segment_images.py
281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 |
|
write_parameters(outfile, parameters, filters, original_pixelsize)
+
+#Write parameters to outfile
.
A timestamp will be added. Parameters are written as key = value, +and a [filters] is added before filters parameters.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ outfile
+ |
+
+ str
+ |
+
+
+
+ Full path to the output file. + |
+ + required + | +
+ parameters
+ |
+
+ dict
+ |
+
+
+
+ General parameters. + |
+ + required + | +
+ filters
+ |
+
+ dict
+ |
+
+
+
+ Filters parameters. + |
+ + required + | +
+ original_pixelsize
+ |
+
+ float
+ |
+
+
+
+ Size of pixels in original image. + |
+ + required + | +
scripts/segmentation/segment_images.py
seg module, part of histoquant.
+Functions for segmentating probability map stored as an image.
+ + + + + + + + +
convert_to_pixels(filters, pixelsize)
+
+#Convert some values in filters
in pixels.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ filters
+ |
+
+ dict
+ |
+
+
+
+ Must contain the keys used below. + |
+ + required + | +
+ pixelsize
+ |
+
+ float
+ |
+
+
+
+ Pixel size in microns. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
filters |
+ dict
+ |
+
+
+
+ Same as input, with values in pixels. + |
+
histoquant/seg.py
erode_mask(mask, edge_dist)
+
+#Erode the mask outline so that is is edge_dist
smaller from the border.
This allows discarding the edges.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ mask
+ |
+
+ ndarray
+ |
+
+
+
+
+ |
+ + required + | +
+ edge_dist
+ |
+
+ float
+ |
+
+
+
+ Distance to edges, in pixels. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
eroded_mask |
+ ndarray of bool
+ |
+
+
+
+
+ |
+
histoquant/seg.py
get_collection_from_points(coords, properties, rescale_factor=1.0, offset=0.5)
+
+#Gather coordinates from coords
and put them in GeoJSON format.
An entry in coords
are pairs of (x, y) coordinates defining the point.
+properties
is a dictionnary with QuPath properties of each detections.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ coords
+ |
+
+ list
+ |
+
+
+
+
+ |
+ + required + | +
+ properties
+ |
+
+ dict
+ |
+
+
+
+
+ |
+ + required + | +
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1.0
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+
+ |
+
histoquant/seg.py
get_collection_from_poly(contours, properties, rescale_factor=1.0, offset=0.5)
+
+#Gather coordinates in the list and put them in GeoJSON format as Polygons.
+An entry in contours
must define a closed polygon. properties
is a dictionnary
+with QuPath properties of each detections.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ contours
+ |
+
+ list
+ |
+
+
+
+
+ |
+ + required + | +
+ properties
+ |
+
+ dict
+ |
+
+
+
+ QuPatj objects' properties. + |
+ + required + | +
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1.0
+ |
+
+ offset
+ |
+
+ float
+ |
+
+
+
+ Shift coordinates by this amount, typically to get pixel centers or edges. +Default is 0.5. + |
+
+ 0.5
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+ A FeatureCollection ready to be written as geojson. + |
+
histoquant/seg.py
get_collection_from_skel(skeleton, properties, rescale_factor=1.0, offset=0.5)
+
+#Get the coordinates of each skeleton path as a GeoJSON Features in a
+FeatureCollection.
+properties
is a dictionnary with QuPath properties of each detections.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ skeleton
+ |
+
+ Skeleton
+ |
+
+
+
+
+ |
+ + required + | +
+ properties
+ |
+
+ dict
+ |
+
+
+
+ QuPatj objects' properties. + |
+ + required + | +
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1.0
+ |
+
+ offset
+ |
+
+ float
+ |
+
+
+
+ Shift coordinates by this amount, typically to get pixel centers or edges. +Default is 0.5. + |
+
+ 0.5
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+ A FeatureCollection ready to be written as geojson. + |
+
histoquant/seg.py
get_image_skeleton(img, minsize=0)
+
+#Get the image skeleton.
+Computes the image skeleton and removes objects smaller than minsize
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ img
+ |
+
+ ndarray of bool
+ |
+
+
+
+
+ |
+ + required + | +
+ minsize
+ |
+
+ number
+ |
+
+
+
+ Min. size the object can have, as a number of pixels. Default is 0. + |
+
+ 0
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
skel |
+ ndarray of bool
+ |
+
+
+
+ Binary image with 1-pixel wide skeleton. + |
+
histoquant/seg.py
get_pixelsize(image_name)
+
+#Get pixel size recorded in image_name
TIFF metadata.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ image_name
+ |
+
+ str
+ |
+
+
+
+ Full path to image. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
pixelsize |
+ float
+ |
+
+
+
+ Pixel size in microns. + |
+
histoquant/seg.py
pad_image(img, finalsize)
+
+#Pad image with zeroes to match expected final size.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ img
+ |
+
+ ndarray
+ |
+
+
+
+
+ |
+ + required + | +
+ finalsize
+ |
+
+ tuple or list
+ |
+
+
+
+ nrows, ncolumns + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
imgpad |
+ ndarray
+ |
+
+
+
+ img with black borders. + |
+
histoquant/seg.py
segment_lines(img, geojson_props, minsize=0.0, rescale_factor=1.0)
+
+#Wraps skeleton analysis to get paths coordinates.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ img
+ |
+
+ ndarray of bool
+ |
+
+
+
+ Binary image to segment as lines. + |
+ + required + | +
+ geojson_props
+ |
+
+ dict
+ |
+
+
+
+ GeoJSON properties of objects. + |
+ + required + | +
+ minsize
+ |
+
+ float
+ |
+
+
+
+ Minimum size in pixels for an object. + |
+
+ 0.0
+ |
+
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1.0
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+ A FeatureCollection ready to be written as geojson. + |
+
histoquant/seg.py
segment_points(img, geojson_props, area_min=0.0, area_max=np.inf, ecc_min=0, ecc_max=1, dist_thresh=0, rescale_factor=1)
+
+#Point segmentation.
+First, segment polygons to apply shape filters, then extract their centroids,
+and remove isolated points as defined by dist_thresh
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ img
+ |
+
+ ndarray of bool
+ |
+
+
+
+ Binary image to segment as points. + |
+ + required + | +
+ geojson_props
+ |
+
+ dict
+ |
+
+
+
+ GeoJSON properties of objects. + |
+ + required + | +
+ area_min
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum area in pixels for an object. + |
+
+ 0.0
+ |
+
+ area_max
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum area in pixels for an object. + |
+
+ 0.0
+ |
+
+ ecc_min
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum eccentricity for an object. + |
+
+ 0
+ |
+
+ ecc_max
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum eccentricity for an object. + |
+
+ 0
+ |
+
+ dist_thresh
+ |
+
+ float
+ |
+
+
+
+ Maximal distance in pixels between objects before considering them as isolated and remove them. +0 disables it. + |
+
+ 0
+ |
+
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+ A FeatureCollection ready to be written as geojson. + |
+
histoquant/seg.py
362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 |
|
segment_polygons(img, geojson_props, area_min=0.0, area_max=np.inf, ecc_min=0.0, ecc_max=1.0, rescale_factor=1.0)
+
+#Polygon segmentation.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ img
+ |
+
+ ndarray of bool
+ |
+
+
+
+ Binary image to segment as polygons. + |
+ + required + | +
+ geojson_props
+ |
+
+ dict
+ |
+
+
+
+ GeoJSON properties of objects. + |
+ + required + | +
+ area_min
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum area in pixels for an object. + |
+
+ 0.0
+ |
+
+ area_max
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum area in pixels for an object. + |
+
+ 0.0
+ |
+
+ ecc_min
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum eccentricity for an object. + |
+
+ 0.0
+ |
+
+ ecc_max
+ |
+
+ float
+ |
+
+
+
+ Minimum and maximum eccentricity for an object. + |
+
+ 0.0
+ |
+
+ rescale_factor
+ |
+
+ float
+ |
+
+
+
+ Rescale output coordinates by this factor. + |
+
+ 1.0
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
collection |
+ FeatureCollection
+ |
+
+
+
+ A FeatureCollection ready to be written as geojson. + |
+
histoquant/seg.py
utils module, part of histoquant.
+Contains utilities functions.
+ + + + + + + + +
add_brain_region(df, atlas, col='Parent')
+
+#Add brain region to a DataFrame with Atlas_X
, Atlas_Y
and Atlas_Z
columns.
This uses Brainglobe Atlas API to query the atlas. It does not use the +structure_from_coords() method, instead it manually converts the coordinates in +stack indices, then get the corresponding annotation id and query the corresponding +acronym -- because brainglobe-atlasapi is not vectorized at all.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame with atlas coordinates in microns. + |
+ + required + | +
+ atlas
+ |
+
+ BrainGlobeAtlas
+ |
+
+
+
+
+ |
+ + required + | +
+ col
+ |
+
+ str
+ |
+
+
+
+ Column in which to put the regions acronyms. Default is "Parent". + |
+
+ 'Parent'
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ Same DataFrame with a new "Parent" column. + |
+
histoquant/utils.py
add_channel(df, object_type, channel_names)
+
+#Add channel as a measurement for detections DataFrame.
+The channel is read from the Classification column, the latter having to be +formatted as "object_type: channel".
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame with detections measurements. + |
+ + required + | +
+ object_type
+ |
+
+ str
+ |
+
+
+
+ Object type (primary classification). + |
+ + required + | +
+ channel_names
+ |
+
+ dict
+ |
+
+
+
+ Map between original channel names to something else. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ DataFrame
+ |
+
+
+
+ Same DataFrame with a "channel" column. + |
+
histoquant/utils.py
add_hemisphere(df, hemisphere_names, midline=5700, col='Atlas_Z', atlas_type='brain')
+
+#Add hemisphere (left/right) as a measurement for detections or annotations.
+The hemisphere is read in the "Classification" column for annotations. The latter
+needs to be in the form "Right: Name" or "Left: Name". For detections, the input
+col
of df
is compared to midline
to assess if the object belong to the left or
+right hemispheres.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame with detections or annotations measurements. + |
+ + required + | +
+ hemisphere_names
+ |
+
+ dict
+ |
+
+
+
+ Map between "Left" and "Right" to something else. + |
+ + required + | +
+ midline
+ |
+
+ float
+ |
+
+
+
+ Used only for "detections" |
+
+ 5700
+ |
+
+ col
+ |
+
+ str
+ |
+
+
+
+ Name of the column containing the Z coordinate (medio-lateral) in microns. +Default is "Atlas_Z". + |
+
+ 'Atlas_Z'
+ |
+
+ atlas_type
+ |
+
+ (brain, cord)
+ |
+
+
+
+ Type of atlas used for registration. Required because the brain atlas is swapped +between left and right while the spinal cord atlas is not. Default is "brain". + |
+
+ "brain"
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ The same DataFrame with a new "hemisphere" column + |
+
histoquant/utils.py
267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 |
|
ccf_to_stereo(x_ccf, y_ccf, z_ccf=0)
+
+#Convert X, Y, Z coordinates in CCFv3 to stereotaxis coordinates (as in +Paxinos-Franklin atlas).
+Coordinates are shifted, rotated and squeezed, see (1) for more info. Input must be
+in mm.
+x_ccf
corresponds to the anterio-posterior (rostro-caudal) axis.
+y_ccf
corresponds to the dorso-ventral axis.
+z_ccf
corresponds to the medio-lateral axis (left-right) axis.
Warning : it is a rough estimation.
+(1) https://community.brain-map.org/t/how-to-transform-ccf-x-y-z-coordinates-into-stereotactic-coordinates/1858
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ x_ccf
+ |
+
+ floats or ndarray
+ |
+
+
+
+ Coordinates in CCFv3 space in mm. + |
+ + required + | +
+ y_ccf
+ |
+
+ floats or ndarray
+ |
+
+
+
+ Coordinates in CCFv3 space in mm. + |
+ + required + | +
+ z_ccf
+ |
+
+ float or ndarray
+ |
+
+
+
+ Coordinate in CCFv3 space in mm. Default is 0. + |
+
+ 0
+ |
+
Returns:
+Type | +Description | +
---|---|
+ ap, dv, ml : floats or np.ndarray
+ |
+
+
+
+ Stereotaxic coordinates in mm. + |
+
histoquant/utils.py
filter_df_classifications(df, filter_list, mode='keep', col='Classification')
+
+#Filter a DataFrame whether specified col
column entries contain elements in
+filter_list
. Case insensitive.
If mode
is "keep", keep entries only if their col
in is in the list (default).
+If mode
is "remove", remove entries if their col
is in the list.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ filter_list
+ |
+
+ list | tuple | str
+ |
+
+
+
+ List of words that should be present to trigger the filter. + |
+ + required + | +
+ mode
+ |
+
+ keep or remove
+ |
+
+
+
+ Keep or remove entries from the list. Default is "keep". + |
+
+ 'keep'
+ |
+
+ col
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ 'Classification'
+ |
+
Returns:
+Type | +Description | +
---|---|
+ DataFrame
+ |
+
+
+
+ Filtered DataFrame. + |
+
histoquant/utils.py
filter_df_regions(df, filter_list, mode='keep', col='Parent')
+
+#Filters entries in df
based on wether their col
is in filter_list
or not.
If mode
is "keep", keep entries only if their col
in is in the list (default).
+If mode
is "remove", remove entries if their col
is in the list.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ filter_list
+ |
+
+ list - like
+ |
+
+
+
+ List of regions to keep or remove from the DataFrame. + |
+ + required + | +
+ mode
+ |
+
+ keep or remove
+ |
+
+
+
+ Keep or remove entries from the list. Default is "keep". + |
+
+ 'keep'
+ |
+
+ col
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ 'Parent'
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ Filtered DataFrame. + |
+
histoquant/utils.py
get_blacklist(file, atlas)
+
+#Build a list of regions to exclude from file.
+File must be a TOML with [WITH_CHILDS] and [EXACT] sections.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ file
+ |
+
+ str
+ |
+
+
+
+ Full path the atlas_blacklist.toml file. + |
+ + required + | +
+ atlas
+ |
+
+ BrainGlobeAtlas
+ |
+
+
+
+ Atlas to extract regions from. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
black_list |
+ list
+ |
+
+
+
+ Full list of acronyms to discard. + |
+
histoquant/utils.py
get_data_coverage(df, col='Atlas_AP', by='animal')
+
+#Get min and max in col
for each by
.
Used to get data coverage for each animal to plot in distributions.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ description + |
+ + required + | +
+ col
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ 'Atlas_AP'
+ |
+
+ by
+ |
+
+ str
+ |
+
+
+
+ Key in |
+
+ 'animal'
+ |
+
Returns:
+Type | +Description | +
---|---|
+ DataFrame
+ |
+
+
+
+ min and max of |
+
histoquant/utils.py
get_df_kind(df)
+
+#Get DataFrame kind, eg. Annotations or Detections.
+It is based on reading the Object Type of the first entry, so the DataFrame must +have only one kind of object.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
kind |
+ str
+ |
+
+
+
+ "detection" or "annotation". + |
+
histoquant/utils.py
get_injection_site(animal, info_file, channel, stereo=False)
+
+#Get the injection site coordinates associated with animal.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ animal
+ |
+
+ str
+ |
+
+
+
+ Animal ID. + |
+ + required + | +
+ info_file
+ |
+
+ str
+ |
+
+
+
+ Path to TOML info file. + |
+ + required + | +
+ channel
+ |
+
+ str
+ |
+
+
+
+ Channel ID as in the TOML file. + |
+ + required + | +
+ stereo
+ |
+
+ bool
+ |
+
+
+
+ Wether to convert coordinates in stereotaxis coordinates. Default is False. + |
+
+ False
+ |
+
Returns:
+Type | +Description | +
---|---|
+ x, y, z : floats
+ |
+
+
+
+ Injection site coordinates. + |
+
histoquant/utils.py
get_leaves_list(atlas)
+
+#Get the list of leaf brain regions.
+Leaf brain regions are defined as regions without childs, eg. regions that are at +the bottom of the hiearchy.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ atlas
+ |
+
+ BrainGlobeAtlas
+ |
+
+
+
+ Atlas to extract regions from. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
leaves_list |
+ list
+ |
+
+
+
+ Acronyms of leaf brain regions. + |
+
histoquant/utils.py
get_mapping_fusion(fusion_file)
+
+#Get mapping dictionnary between input brain regions and new regions defined in
+atlas_fusion.toml
file.
The returned dictionnary can be used in DataFrame.replace().
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ fusion_file
+ |
+
+ str
+ |
+
+
+
+ Path to the TOML file with the merging rules. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
m |
+ dict
+ |
+
+
+
+ Mapping as {old: new}. + |
+
histoquant/utils.py
get_starter_cells(animal, channel, info_file)
+
+#Get the number of starter cells associated with animal.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ animal
+ |
+
+ str
+ |
+
+
+
+ Animal ID. + |
+ + required + | +
+ channel
+ |
+
+ str
+ |
+
+
+
+ Channel ID. + |
+ + required + | +
+ info_file
+ |
+
+ str
+ |
+
+
+
+ Path to TOML info file. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
n_starters |
+ int
+ |
+
+
+
+ Number of starter cells. + |
+
histoquant/utils.py
merge_regions(df, col, fusion_file)
+
+#Merge brain regions following rules in the fusion_file.toml
file.
Apply this merging on col
of the input DataFrame. col
whose value is found in
+the members
sections in the file will be changed to the new acronym.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ col
+ |
+
+ str
+ |
+
+
+
+ Column of |
+ + required + | +
+ fusion_file
+ |
+
+ str
+ |
+
+
+
+ Path to the toml file with the merging rules. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ Same DataFrame with regions renamed. + |
+
histoquant/utils.py
renormalize_per_key(df, by, on)
+
+#Renormalize on
column by its sum for each by
.
Use case : relative density is computed for both hemispheres, so if one wants to
+plot only one hemisphere, the sum of the bars corresponding to one channel (by
)
+should be 1. So :
++ + +++++df = df[df["hemisphere"] == "Ipsi."] +df = renormalize_per_key(df, "channel", "relative density") +Then, the sum of "relative density" for each "channel" equals 1.
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+
+ |
+ + required + | +
+ by
+ |
+
+ str
+ |
+
+
+
+ Key in |
+ + required + | +
+ on
+ |
+
+ str
+ |
+
+
+
+ Key in |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
df |
+ DataFrame
+ |
+
+
+
+ Same DataFrame with normalized |
+
histoquant/utils.py
select_hemisphere_channel(df, hue, hue_filter, hue_mirror)
+
+#Select relevant data given hue and filters.
+Returns the DataFrame with only things to be used.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ df
+ |
+
+ DataFrame
+ |
+
+
+
+ DataFrame to filter. + |
+ + required + | +
+ hue
+ |
+
+ (hemisphere, channel)
+ |
+
+
+
+ hue that will be used in seaborn plots. + |
+
+ "hemisphere"
+ |
+
+ hue_filter
+ |
+
+ str
+ |
+
+
+
+ Selected data. + |
+ + required + | +
+ hue_mirror
+ |
+
+ bool
+ |
+
+
+
+ Instead of keeping only hue_filter values, they will be plotted in mirror. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
dfplt |
+ DataFrame
+ |
+
+
+
+ DataFrame to be used in plots. + |
+
histoquant/utils.py
576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 |
|
B(1X1kktKPTLMR z|8U^A5yEVo#l){Y)<~e@>A{SZ9(&A+^yi1C={tHpasa#s@rRmA-ii(Nb zlaQ2>wlDMR5@m^aqC28a`VIGR=CoXSJoV^xr9=TAPP6)r>h{FRvPai68 s zy|qB<5Lxvv%2q|93(!qDq{Bnvl0kPZP~QBg?+8@#o&bQlK%apF(9WEK;72m>*A b8rodb1ax=de-AsQCltKNm!CD>eGIERyGb652WU+8t#zI&_ ziD-zpXUO2~TyT_$mPPbBSIk)UgEZ@A13r%ur4cFQgn8~mUA|q%!X0KHu)daA1F)v0 zft=1snR8KYEP6p@abpLgfi|f3*_u(0lKB_|=vl|GpuZN*Al1$#^k}`xubDI3Obkc_ ze*r!CcNG`veTGUhlXuGKVFPafv+dgCOgPndjH@zzX@VK-i$Mv&X1`RzWgY4HsV@T3 z5} SEfY?em*4$N^UbtV;;IDRnpR3I_{`;rmDK~GK;CD)O*ThzMSJG=`*K-=pT~0 z(OB6ki@7q28nr*D3$-qSlDswgb>iyWO1aLxZ7wr=sZ~2IKd6gSjhlGNw4~l^cYR#i z(ID{HPkCr$MWXzX{={Q12F4D|`x$aQ&xJ7}8|Md{R`o?owul;aOSMLWCuWL@<*1;b z_dA?Xi!K(${6Db}rW0$?UY9-PiwXxql2?JQC}329MifCRy;Xz(G 2j8WOg)I>(;RPT3m&rW0?q;!(yg+tRV24(5g0sUg zG6V}F$WTT9#x^p>wzQ`ThBlEiMgaQIQHfhSRyT4>6m=&elh+GNeVgr_5q?N`>wSwt zV|YP%Y~4b rWW4b?#0Q|Ept_awHX@~F13 sau>N_A7!$z zRrXTu306@> >3PA^~b0z6H zu0 _!#=-pH?`XUUr<|uDaW%f U96ePp^>1JxJ@ zDEWtwq~8guLUTC54#7tDp(R#mqr@1)q?8B`5=`2ezqkGF55hQ!hHTcjMOfDmE#?i8 zoJdsmX;uKzTbF&A^TCs!mfT=Mm=#Kln{3?LUY4t76oYJbu>T^5lb^n|uF_VBJ-xe; z# uA{Lg6;i!N*WUx2Nkyy%@Q|F@6vBvYnjUeC6|qFA)o0dqcQ5cN$xONvJ?u zJ;I1JhM7IagbaOr8 W2wKT `>-vY&>i^AFDco;c>h8xyoyK}>*5rz!ag9w_Ig!x1T>0zGq-=< z`6y+sHqzxUa%2ke9xt8}^)*PN&NDQfGpiAayKiul%QYa(aD8_4rYRo&!Ar+{`!(Ib z{=5p$!^_*`1umf~x4!@`fv;YaJ8O(t`*6z#zVeBSrHZ{?kKWgPrCdp%R^q5+#iP_g zhWSwMnWH6SF&T#LKLRPczYgZdv# jUwT#4% z$Qj53>mt>pcKd=UzENhwEDJE>I$UjxiG!{pvN{I;sow%ZZtw)1@FpU4R12}yb*4K5 zzFv=jo8j3m>9Y7u)mLu &$J}KGH`PFIgeHC)G#7$-Mb@;e+f%+XQNilnS=LNq+um~I)5X%;gIo =vBAD<2LZlV)b@imA@1mvvb0bz2x_94Sy&yk4Y(Ez zg#A2)++H*tPX!Fg<%PE*CfV3cD$C;_V&+VuLpF7;Ud~t(sU9kBcV4`HusaX#G#AH* zCa8|V8uxL#=Tr}pK3Rd6DXMsq%9w+_7P++rd0kit<%1W_c>aN}qbUAj_iZ54&?6Lh zUx$Tb9=VpE90tXL{N&U-q)Zj^Li6`(r~`Qo$ZlGFlkf+7>`iE4+Dwww;{^G}GX4gt z0Umq=T_waEmKiJle>LNW6)F(w;7qgNC~zo-8sROb>{k*L3{@i6UZ~wjdL&CMK7VxB z ;<8)U__c5Zx{8L-Z)_oVaW1r?$2 z31dKkIWQG`ZuXph7DIOj%E74PWuBB?LP1AsIowWT*eHc4TrwF3pTjrb$^3qzCH+LJ zcu&zt58MIW-J7w%CG^@1QKBq@BpAGO!nCa-Bh(AJawIt8Xz;q7?LM2Qz65jNF2eVu zbL0gj!OLgJK8Jn$ooKo{(6c>EswEV*HdVkOHpWr)v}@dlnAXoH*ZG<^_-T&4uE@!> zAlZ7@n~)%U-j+(tJB5^#F)u6r^jY>4mp#?yHPu!9BWAY5zG@`7ikn>3X;{@9xl-`9 zvPdgTZuU_lp$LC^u(=&<0=K$l3s?Y=S#|zhL@u<6yuC~0qmw$v%1lzV<0dd{G`D66 zj%ft*pqnv~moXd;p>TYv5uQZ;93@s03fgO@5k5ePKINpz%O(nWTzuNbam$S%bB~vv zy=&!R#lx=n^>NO#{7rB|SMb*eVu+ AQf} zt{qm;xszhJ50g9V{K$wX|3G?i?ms^9^Onr*TfkC~$8^sUpPOKPB_w(2gkD>P{tz;% zb&1!t I);V1C#uVgkYs?yD?wN6@~2B7B$IXoNrAG`<1gn{cB z(-{PifV6MBGq_#fp}=!F{zzb;%K5Q8#Cmyd@hl5q9m3}?D!mXs+cxbV3%Q?H833P- z5UZlwOP)B$40w7+-0baSVB+=wz1t}g%bH&k*Vn83feYR66@+s5Hw8CJlq10pu;e(b z)<#7_eaP#%0~a`^JNth=MqPp562x{OC-g~~E~5iJi9e7H_~ZZJLyp pcKMqipKJQ-@oKgs%R->bL8T_$N@=;#_)}!8DCo#b#61 z^gg|u?XLeiw8Fj;^U8(`QGq~GnGrCxJsYN7^EDlu&hUJK$yde3`7YX6*H*8iPJ@>U zz26c`abX5FD%FDD(j_O nrsnNK&`Ow zjns?^vAb *d z$TD0BEQ30t#r!FgG5`*+KX-$I>E2*rSEwZh8liEKXK;E~y_ym+-S^^byO;-#QVtwV zvFlm~EkJZ?M1i%Fq_=~T7j^Ob8P9Jl&u_VpzjewQtBVz&9-nc2%yNBPIP|!ASAwQ- zrs_}M{AM~j810)Ll>#P$!zq%%406fm(E)G(Ip2XU!1u1XyDB ZrI2y@4r+SMV750o)-lP7l zu6~y3gszEI3ybw23d+$DF<8IR$BXUx$<*W1QG?~aYl?MNoL60*DN4(z9y+qTnR(N* z$@<}MCXplPCP8?~(Nm;Ld0a3Av^DhHX8P>9n1+J2VABU^jo_(4{-HdDgoBQT&a+IH z88<^eupDgNTsF4qj1ct^;m6L$gC3BbK3d&2-y1vwU3@LSeoQ4yq%(Fj7NmhEb _>r#t)WR^Zf z^aNdscasaYn*`ulc2c!eqj$%EZX(`G;7kH`;*Perd(G*N{VbPyedo^eAFQLOJ9Xtm zks8nr&rV}%ah;zp&kSO^H)*Ge+=UuI=6C^)fuS=p gA&$*gxD>ltRh`YeaZ VbBCA=1rw~V;c_&X70Ktis ~w4 z)M0P-zSPADYjUtPx@J5duDy(_#DtLBR~9Sa0=V`Pt^yNGK6`bMbp@T^lj8#h!Ffe^ z1~vj6 j1Z|PcH zfBURFP@H-yUTS{J>yqU|dA7tmvDB*ZkAeA64s;_ad>O+{$Jg{tG{9}(b}canN3X7& z-uKFG>Nd6#!>Zza3n+z3faj+YmN3~|Lc;=|2Jf~6iJv~=H5vn+Dp6!4D|Y@|>4Hx~ z6U9#7@QqY#l+$W%<2mmXs25&-fa*;Gz>NH+R5%^J*bpQMwLqQ4Bi7=p*JuA%xexxb z+yL2+ >+`x^z$SI7Nb^!^+f97t3 ztC7#WLtemjUa5b&Mc*BL*-Ln)d-N8HSW^7} Da`SRiLD(=4)7WV3Y$FUO#OMvX$#M3F3 zwS>p~E^QM4-6RCS1rs8#LXn*;hdmY))#SS94ZBxl_h!DI>yX|yU3j)`Mla{Zx#}&l zv}zmM1T=fQQ~Lz)cFd=iV9?C3-c8ZcO+H_Akc0frqt!KUBK~{97=^M{(<16$Jj= zd57)&?oem7K< mwu_q$TikC>S)44dI|gCR!0maSHx zKwi}5x^WHxXI=_rkbtMNg2}%Y6Vp^uy8As5NWZF2HX<4u>xa$k+04^!&wjGR1g`8r z0;0f?E+cs_T(lO7NY!#oX1hWKG@#0GdWJBC#h{bHDbaB}Bz_PH2<_1elE_Od4p2~j z4CY1yADzo$4JZQ`Sk~+UD($>W2OuX)M8E{509)kOE{1l5mUfjddDylE(+IR92y_Em zy1l%i_N7kq`n^_18`VJI*-tcXdkE4c3sNRP5t8*hK~l*?4pPh2u2Bw;2#MyOuE0wz za4!u+B-VEh(Iuh@Bum&}dQEhk4(J~PW{prLpa?R96c1#*#Hagr(A_^Fe+!rM#gI2- z>u&6@ZErbhGHfD8z89`|aX;nwgkk#DNas2icV(`|j?K)tVC9`^e*txp%6zTY3f^E{ z%uCT|dtCe=r4(&d0pcZ)F^IhD@%6 cSmNR-qG^|surbs~)$3?9sxh&~QpOZan#ied zryUeYGkAzf^hQoJ83&2_s(ci=s29{df{Sy2!lym5+mX_Y=Q*ZDdW|YZ)W>0qRI^QS zTfW)Fg#nw@;_>MoU{Dq(VB2cxNqTa^3uI+{7!gr+B)>@t#0*isf>>|MmT=|O&1(7+ z!i2s&Djv`}8jU-Yvz{FUJeNBTv00}vV$MZ*kCaZXtmM%KZ8pVxLD7`-K?^cXLo{0n z6Qe#}%oh4=9j}6Xm%}HVYT$J#-)KEPZ~M;!UCt+e8P2hcYR}e4mW+}{=;Rm&6)KOq zU5`j%$XQt)m?qWZ&9FHg(%5&h%MO=E6WjC;Rkm6M &0A-OxbgV$XGVkML{$2xEJs ztHF;1Gmj7#u&E04KKN+7P+0y|T`{lw?X_5~-i!r}#@?MyLQEF(1_QgD6kMQ~2X`*5 zTYv|Wy+kAs7=-1YkS>n{Eub<&^hWL6A)vUEHvd=o_8~_0CH(TeS2IjOD(sUzq94q3 zCJo5lqG$A5^D=LkQ>-pB-e;_PSpsQ3Zi@-Ha#b^wi%ZUw=Nynb2vZw~pGeVyvLGhv zviX8nWzS&X7IupTH7{ZtPq2fmme9U~H}8=+@S0>qw-6?c@%-{+-E7k6)BOyR4Ob&2 z-T)N{2Tds5aMP&m33(enmi8WkMKR<}5HYdFT3E?zsruuJHRj`U{HmNmMHP#4MzRrZ zW4#{C^{icg+_|h+=n}#DyAjK0J3(!QZd!;RE&GZs>+{t*DA?;?tT&8TJ07Y$n@rhr zRltOc$!{Ww-6p0bsy6Um)Wy)XX2^KWVt2+l(^y)gJE_|+;Em8FF1Oos&a <9U-mCRry%uQuS?qj#Zw##6Ug){v3IVxgDs<9AY3vX*ciE zz|U~XZIDABg2*QZ!MBDxTczAQ%+-g^2o)v8xOY|gC~{E*!XaN A;x zH?s@4=09yJ9GB+wQZ(YB{ ob_U&+_=0beGgh6LgvRyBGy z1Uov+@Y}YUSx5O(pGtzt`P$WZm ;1Zsk+FRouB*cAZTZS}@p<|E23Ix#%=kJH(m=cqD; z*^~o&*ol~DXj9Otc}&s9H5}TWAt1;+GyxT}BIr5@3TEHQpFA=b|1(?eOT~Gzan8ib zp*--n{GJt}a+udY-roD{pZP1vv9ZbwcjwPjvzVD2pF|1oN?TfZl)BBDglEj_JX<3S zr$aP$>+qdJcCgCTjnv?#A)#s~J852Hw}-=Y_*7EKK@ysFjF2E9>9L+pF_u{>iF&+7 zP)lRfyXT{lOZx71^>*BfXe0Zy!$)gQa7jK>vLw|! %sz<~4`4FWnwmRjwOCtY3=p46W2skEtNpO3u$ zBko8Y9l+PgKi)$7bnVNBT7q>H2ye}rh>>= ^mb>fSTR;GCG8FMv z{%Lp*q=~4amSj
rThleA;w$T>z0b PVH`9z_vM57;95P)k+QNUr&m}iN-l>N+%5ymhk zy6Z$RrVzN`O0j^9WSoQqQiz$1MU)BJ6JHxF6v?TV2}hm@6vxkkb>U42EB3fWX+Bpd zCA{p=vP&I$Z{5DBpa1@=+`rTv9RmP(@Gp4^fY;0QpMMTl?&bTlR2UH$00c;nKQaJv zZS+>@_XALo=PO*^^@OX+zqfJpXA^T1e y3h~_PE@Ps98?uW z!k#1Y4B|ZuczpWYTqcArOWb@gex9qHJrEmHfBD#3&HFHpSZwXQM(pA>3lsnd^F-{$ zcOvfojm7+8-CBPL{YR`3U-Z2>-Ta2JjWg;X*65#22gWGScas9Qhz7(BsX)4HI_wB- z+cY!>O@;!D@ibz9)F55x3Q~p`;APuJ!TiaMGq&&RfdDS6B&8Rnj0FvV1J@yt50^2G z0BH`e)90_#5Jn%SVWd1yBd~hep9CtsJ57LDC&@z$9JYDRuBzeXJ27j_D$zvanp0RS z*3HC==1f$%%Qj5ecs`AI2`SAwq 2EC(PA zf 6E8=#5LJ`-2qbTL99Y;~O2dA#!|Gy=0BE!;YV5{B>4aS{|kZdY-b~emB zr^j9;X3lDA(J}t-Y`DeSUs^Mjp#FUo%AzOEltQ;k5MFe`_kyh>c9-E9&$t?e%zs zyai&evC?d1HMz@2cWAd#6W0+M`y|)@Z$I-sn)yG##ibM|n*u?g%*#SmM)y=W1Xo24 zP#8jsvZ}i?JU_SI|9_*lv1CLI0gGsr(K4z>VBo9*hvgM5R 18(dmx455M%^7NM}t3Cc!v@Qy~LC`RWD$%;E5Nnrf5()g^u*7-g1^ z;|n+`;@dk+9_A*9N63A;UQMAH>A(Qc0lq#6vpdR3wvKE*1MX?U)rT$sPyyp=_x@M& zKbI-GJRQp!T4W5rsQ;>(Bqi9QD& gG0UU7p7QIthiDcrx?hW_i>NxH_{cmW0hl9*n< z{VM~)G5`Qr0p=}X2(p|7$a=OQyLdqihy)pO6y$OwKnMd$08$9s#|tE~i#rb yqPvh){yibUz_p{vw8_ z@1Vern_HnE>6FF05>{OU%fqX1?Ya-1`!_Z4#Sfo@V%q(d@8o-uSW0%@vj2ks@Ap_| zD7*=(z6&cG)#MzVa%z}c_%up3Q#YGic&Rx1hR@t|y>7FI$J$kwU8UmomY;aiF<;vX z5vitKsg#q~cL(8^Yu3 o&_L>P?b(<)og7;LYbOuXw?nW-hgZCo@l@oS#3$> zb#VoxJ=t7#dG&qBbowlB+?yiiLz4F_us!H{;8IeC{?9-{IR?a{=8@I @4jm<|-w6%A3_ntcaPyc{maA )5ZV( zUs`^)vbwhU>dm{{`FniCCw#^i{2?()Qi{@)p)AOH^I!xpo$EXP^@$lhB IU=#MI=zm$j JtQ#}En1(EMlYrn!CNJ?4k7#yNf_13 zP-Evj!kRgH56wxO3`tU3mWu6(l5yv7<7YrN*4rcJaD4?D!}ZDgIU5Y(06zlp`oy^b zoU@M52Og5o7>to-ybGF;v(I&;(ay2(qERn!anef1Y~+6ah|{tB?&X?m;t~Pv)&y28 zkO?XjEdj+cd0m$A!D0hV*jX#0Gn2`hLdc< #{RRP>^u9* zz6yBhyskA+ Cde2E7OX^oTBihSoBoV@eRDqVjz8G3F#XmA}r@%36b` zd{na|+S!|ZF^@ys5}b$}2tSAFNVS!q-se}gCfRB4 *1Z+ed}J zceU&48g~Kc!wO$(&FdxtLOdYE8Y8lGGa!V^4!+)ZWelC*+OJ;d@4aIa7|CtC+i>4- z+i={_`|k6~%PUbo{;cfueJ{YbmzFu-{#oi;>RfUK1URBo+4IVP+o+Zi^cWrTD7L7x zp)>b|Vi+?y*8dA&%FMJGH{3L5UaUBirp&q?A|Ja&@)N>KD`)X4&2kpP+9ZI|2tk=z zAe14v@gxmQ5+%Y$rv+LCf(bxzQnbKLfXM!!^eBK{o&XLg1*kU=@9;@P2M@LsiohsR zEWtqzRAmlt4?3X%u7WD!AXRQ4Q<|;T#<3i0w^(lYb-qETgvG!j_jqu?fQP=f^D 0h|e4_t5tUCL|W$IctiWVco8gH2h2RwHD!DGfy%p6>< z#eL29-On2QfUB?<8QHsHui 4q_V9S#mhf1xW*9Oh)M{&o!fq{Y z#^!3)0n5f+yhB`_g$IHcJH$36s;Gn&lGuH|R5eZOm6qe#C)lkaiq%Fa-J>0L%ZtR{ z43l<>^6w#m#;DlgbQNp9&rhS8rf#}2I{-DCQGV&5Ls*2XWC5U^0Dun!a2<$$1{D4X zK=gZnodtA%dalRMNFsVN<`nagBUH~Jq!(csGz$CRij`GQ!isb}k~B{t`aW1quMTNt zg&>fAn}L`2n}%#dzEx%>$tkf`Us4(~9(2 3%3EuTZb*%zG$!_C6jDM~yM~l9 z#)^EbGd9geT|1|eQmbW%BRA;<-z4)Px~$9G;*+wG!$&VamXwdqJXVrY1cF_-P|n1T zXg91KvoFR~o|C@cX*UrO**sUdXOzZZK19@;2r| *=FC2=eoe3Dn{+@yv#tF z4rWQnp0_$C70pT~ G)!hKrv?_YiZwnxG=!Joxdqw z>+8qZ4;?CBP2e8}TP!riwxX5Yrcp0}DU$=9C&o^U($1H%UM7bWyWJm08OSsZ;zig= zHig}}N)?t$aXRH90yQijU&ULXt*=4FF9LRC-;~hvYvwm|VV_$89yC40~17dKP-B z(`&WV7b6WSMFXr}rC(gPd#ZWW*d_297Br!VwNzUC=Z+|ACAPV<$UAZBjHc}3Bv@zJ zK2ultgsdm|jzap<+F@!~sg<9Zn& iO587K71ruvi>F?m7E2VP5ppf#X)L*9e3;ka+Dy2s06JTbp2h)9DV3w06fE}O_u>%}B&+XJuQ2^|^))F4#`vVqwxyUnzs7>(?wPF_ zuFtOBn&lQ*VAa)p6>s4Txr_j(wBD^e_a}b%`SQpA8o1PYM6j3|iA)mJCM1PkH+#R% z>#JERajDHI`uAJPLg_e7z>a-Uu fu=^iivXX(MvKHowK=Rle)2a 5b^)Lk@;E^Eoz}K&u>F*k& sR z;;?j@yrr9B(!i;Us1?UZVVN}07`#J^y~(GSGm#ArJ+-bxoi|UMeEdTD2KhKiJy*p1 zX`mzX;V6gJaEZ(J$1bF~Dr9>UxWpHmXEMWFeIgDu`stV@9}3b eZL{M`)0ce4*9Gg2o$aMgwX@Z$Ij2o%^iGjR6Fy?;M`p!ZnOwq0+XuRiit z_rV!YJ6nRT8U11V$e#a4^3Ck2z+bh 3Hn-?;IvD7lIEqZ6 zsM@=*@-v7plxlmuI*J4TgWf#1=e(;O{(&G9^nN=XuT32#rfX=HB+0|6Hn`d)1T_7s z==iH^4rioOPfVRZirAWv&ecY^eJ4gF;~xm(NRc0dRqDKqul5|_b!Vz}ApV=}Q l ru>NcO{-NmVMSXS@G5_!*No`Xa~@`?Mu@1M_!A&Lh{B zi4&RP)dk(fNI5cdQKn5>Y~jtEFEZ&}6G;(yaVN^4eY#3vbz8aPgNSp)2!p^rvvw(1 z2}pIYW)a+}e{=B%c&VDqFF~ZWt-hkTbc;oNBYCRKWnArYmS~m^_Oox#XBm#> J zgo moV@4OopMHtFkUg75SVlR j2g{s4;Tq|fNav}7#!;VSxD&bd2;G?opzNZi z6eJ6|*bpoMx4^Asr=0>7sl63KH~2YlFm#Nm;TadiBaDIsnM0Mm3+Q^JZw0w5AC&=w z_d^ayDeq_;5)XIB82Wnskqaq|4~H92H$KWe3V%w@gDzJ3e8g5N;p*zd*AM2y=};Hn zVHH; _gn3=4V(=DtiO4v`73$@DL|80RWip>sdWi~Pk3s0 ztF$Cw*gHriMYk8?`3MIJ&<@nu=VNCT-rR}QAk*ihTcvXjb=*KVAU(^-74%R+;7b3h z4LSgBPaEPQEaV@KDU11CCZsSi0&Yajekmr`6tvvBhqNG7cPL}2mwBmIeDn!}0#qb) zCYO=NU*{1<0ndIo9dnQYV(quGk{;HbAkT4Q3*Cm(38NrgUV=+k z8mOLGM3SKTIAW+Bdon%+YJzJgvHwtc6KSbe{TO7y88`!)J1L!!bu%sPrX2aXP%<=C zqZG&ANW)Ch( k_b7P?a*T8CY5D$cxrKUsa-Lc{F5-wVlCxp-^X?Nt~Sn#riFe kIK{vv64=I;YMtI3%o(v)XV3r4*hPs(9A1cBkq+=%O zT&CxD&+P$H!-nBP!$xS%&TuM=&R1BYhYP3pVPd6MeZz(I=?TEMVL {KYAQu>WhvV<>;{91mp}Ow@|YC zc8ST3?91c&NP23wFMB&4MiszM_)Q{)e6(An^)fjFIv=w;$TjLkcs^VMwRee>VA`Rv z+kXMZ2bagHjzKaZK{hoZ8AGo9Hav*pLC2FQ!5xqOLq?FZK `nb@&Jm z?m^npxBJu=n&w7$K_hO~_WJ7Yz~R{nOo~WQQk0oq6Ca))H9Q-E3#bwg$&9 yT5c$Q5~CRfRX%ErlJ| zQNhr0FLkD|%^o7hKvBR`5cFCgj0NLDsYn=dI@FxKpyi9=-jy$0*R`-JLU0vE1