Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…maging into dev_widget
  • Loading branch information
kushalbakshi committed Apr 9, 2024
2 parents 8671a62 + 3b420a7 commit 7662b57
Show file tree
Hide file tree
Showing 16 changed files with 378 additions and 234 deletions.
8 changes: 0 additions & 8 deletions .github/workflows/release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,6 @@ on:
jobs:
make_github_release:
uses: datajoint/.github/.github/workflows/make_github_release.yaml@main
pypi_release:
needs: make_github_release
uses: datajoint/.github/.github/workflows/pypi_release.yaml@main
secrets:
TWINE_USERNAME: ${{secrets.TWINE_USERNAME}}
TWINE_PASSWORD: ${{secrets.TWINE_PASSWORD}}
with:
UPLOAD_URL: ${{needs.make_github_release.outputs.release_upload_url}}
mkdocs_release:
uses: datajoint/.github/.github/workflows/mkdocs_release.yaml@main
permissions:
Expand Down
13 changes: 13 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,19 @@
Observes [Semantic Versioning](https://semver.org/spec/v2.0.0.html) standard and
[Keep a Changelog](https://keepachangelog.com/en/1.0.0/) convention.

## [0.9.5] - 2024-03-22

+ Add - pytest
+ Fix - minor bugfix in tests
+ Update - clean up notebook
+ Update - Elements installed directly from GitHub instead of PyPI
+ Update - Black formatting `tests`

## [0.9.4] - 2024-02-05

+ Fix - bugfix ingesting field xyz for mesoscan (multiROI) from ScanImage
+ Update - make `output_dir` if not exist for `task_mode="trigger"`

## [0.9.3] - 2024-01-29

+ Update - DataJoint Elements to install from GitHub instead of PyPI
Expand Down
17 changes: 0 additions & 17 deletions element_calcium_imaging/__init__.py
Original file line number Diff line number Diff line change
@@ -1,17 +0,0 @@
import os
import datajoint as dj

if "custom" not in dj.config:
dj.config["custom"] = {}

# overwrite dj.config['custom'] values with environment variables if available

dj.config["custom"]["database.prefix"] = os.getenv(
"DATABASE_PREFIX", dj.config["custom"].get("database.prefix", "")
)

dj.config["custom"]["imaging_root_data_dir"] = os.getenv(
"IMAGING_ROOT_DATA_DIR", dj.config["custom"].get("imaging_root_data_dir", "")
)

db_prefix = dj.config["custom"].get("database.prefix", "")
19 changes: 13 additions & 6 deletions element_calcium_imaging/imaging.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,13 +254,9 @@ def infer_output_dir(cls, key, relative=False, mkdir=False):
e.g.: sub4/sess1/scan0/suite2p_0
"""
acq_software = (scan.Scan & key).fetch1("acq_software")
filetypes = dict(
ScanImage="*.tif", Scanbox="*.sbx", NIS="*.nd2", PrairieView="*.tif"
)

scan_dir = find_full_path(
get_imaging_root_data_dir(),
get_calcium_imaging_files(key, filetypes[acq_software])[0],
get_calcium_imaging_files(key, acq_software)[0],
).parent
root_dir = find_root_directory(get_imaging_root_data_dir(), scan_dir)

Expand Down Expand Up @@ -376,7 +372,18 @@ def make(self, key):
ProcessingTask.update1(
{**key, "processing_output_dir": output_dir.as_posix()}
)
output_dir = find_full_path(get_imaging_root_data_dir(), output_dir).as_posix()

try:
output_dir = find_full_path(
get_imaging_root_data_dir(), output_dir
).as_posix()
except FileNotFoundError as e:
if task_mode == "trigger":
processed_dir = pathlib.Path(get_processed_root_data_dir())
output_dir = processed_dir / output_dir
output_dir.mkdir(parents=True, exist_ok=True)
else:
raise e

if task_mode == "load":
method, imaging_dataset = get_loader_result(key, ProcessingTask)
Expand Down
24 changes: 16 additions & 8 deletions element_calcium_imaging/imaging_no_curation.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,13 +254,9 @@ def infer_output_dir(cls, key, relative=False, mkdir=False):
e.g.: sub4/sess1/scan0/suite2p_0
"""
acq_software = (scan.Scan & key).fetch1("acq_software")
filetypes = dict(
ScanImage="*.tif", Scanbox="*.sbx", NIS="*.nd2", PrairieView="*.tif"
)

scan_dir = find_full_path(
get_imaging_root_data_dir(),
get_calcium_imaging_files(key, filetypes[acq_software])[0],
get_calcium_imaging_files(key, acq_software)[0],
).parent
root_dir = find_root_directory(get_imaging_root_data_dir(), scan_dir)

Expand Down Expand Up @@ -369,14 +365,26 @@ def make(self, key):
task_mode, output_dir = (ProcessingTask & key).fetch1(
"task_mode", "processing_output_dir"
)
acq_software = (scan.Scan & key).fetch1("acq_software")

if not output_dir:
output_dir = ProcessingTask.infer_output_dir(key, relative=True, mkdir=True)
# update processing_output_dir
ProcessingTask.update1(
{**key, "processing_output_dir": output_dir.as_posix()}
)
output_dir = find_full_path(get_imaging_root_data_dir(), output_dir).as_posix()

try:
output_dir = find_full_path(
get_imaging_root_data_dir(), output_dir
).as_posix()
except FileNotFoundError as e:
if task_mode == "trigger":
processed_dir = pathlib.Path(get_processed_root_data_dir())
output_dir = processed_dir / output_dir
output_dir.mkdir(parents=True, exist_ok=True)
else:
raise e

if task_mode == "load":
method, imaging_dataset = get_loader_result(key, ProcessingTask)
Expand Down Expand Up @@ -452,8 +460,8 @@ def make(self, key):
"Caiman pipeline is not yet capable of analyzing 3D scans."
)

# handle multi-channel tiff image before running CaImAn
if nchannels > 1:
if acq_software == "ScanImage" and nchannels > 1:
# handle multi-channel tiff image before running CaImAn
channel_idx = caiman_params.get("channel_to_process", 0)
tmp_dir = pathlib.Path(output_dir) / "channel_separated_tif"
tmp_dir.mkdir(exist_ok=True)
Expand Down
19 changes: 13 additions & 6 deletions element_calcium_imaging/imaging_preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,13 +444,9 @@ def infer_output_dir(cls, key, relative=False, mkdir=False):
e.g.: sub4/sess1/scan0/suite2p_0
"""
acq_software = (scan.Scan & key).fetch1("acq_software")
filetypes = dict(
ScanImage="*.tif", Scanbox="*.sbx", NIS="*.nd2", PrairieView="*.tif"
)

scan_dir = find_full_path(
get_imaging_root_data_dir(),
get_calcium_imaging_files(key, filetypes[acq_software])[0],
get_calcium_imaging_files(key, acq_software)[0],
).parent
root_dir = find_root_directory(get_imaging_root_data_dir(), scan_dir)

Expand Down Expand Up @@ -566,7 +562,18 @@ def make(self, key):
ProcessingTask.update1(
{**key, "processing_output_dir": output_dir.as_posix()}
)
output_dir = find_full_path(get_imaging_root_data_dir(), output_dir).as_posix()

try:
output_dir = find_full_path(
get_imaging_root_data_dir(), output_dir
).as_posix()
except FileNotFoundError as e:
if task_mode == "trigger":
processed_dir = pathlib.Path(get_processed_root_data_dir())
output_dir = processed_dir / output_dir
output_dir.mkdir(parents=True, exist_ok=True)
else:
raise e

if task_mode == "load":
method, imaging_dataset = get_loader_result(key, ProcessingTask)
Expand Down
74 changes: 34 additions & 40 deletions element_calcium_imaging/scan.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ class ScanInfo(dj.Imported):
nfields (int): Number of fields.
nchannels (int): Number of channels.
ndepths (int): Number of scanning depths (planes).
nframes (int): Number of recorded frames.
nframes (int): Number of recorded frames (time steps).
nrois (int): Number of ROIs (see scanimage's multi ROI imaging).
x (float, optional): ScanImage's 0 point in the motor coordinate system (um).
y (float, optional): ScanImage's 0 point in the motor coordinate system (um).
Expand All @@ -222,7 +222,7 @@ class ScanInfo(dj.Imported):
nfields : tinyint # number of fields
nchannels : tinyint # number of channels
ndepths : int # Number of scanning depths (planes)
nframes : int # number of recorded frames
nframes : int # number of recorded frames (time steps)
nrois : tinyint # number of ROIs (see scanimage's multi ROI imaging)
x=null : float # (um) ScanImage's 0 point in the motor coordinate system
y=null : float # (um) ScanImage's 0 point in the motor coordinate system
Expand Down Expand Up @@ -296,59 +296,53 @@ def make(self, key):
import scanreader

# Read the scan
scan = scanreader.read_scan(scan_filepaths)
scan_ = scanreader.read_scan(scan_filepaths)

# Insert in ScanInfo
x_zero, y_zero, z_zero = scan.motor_position_at_zero or (None, None, None)
x_zero, y_zero, z_zero = scan_.motor_position_at_zero or (None, None, None)

self.insert1(
dict(
key,
nfields=scan.num_fields,
nchannels=scan.num_channels,
nframes=scan.num_frames,
ndepths=scan.num_scanning_depths,
nfields=scan_.num_fields,
nchannels=scan_.num_channels,
nframes=scan_.num_frames,
ndepths=scan_.num_scanning_depths,
x=x_zero,
y=y_zero,
z=z_zero,
fps=scan.fps,
bidirectional=scan.is_bidirectional,
usecs_per_line=scan.seconds_per_line * 1e6,
fill_fraction=scan.temporal_fill_fraction,
nrois=scan.num_rois if scan.is_multiROI else 0,
scan_duration=scan.num_frames / scan.fps,
fps=scan_.fps,
bidirectional=scan_.is_bidirectional,
usecs_per_line=scan_.seconds_per_line * 1e6,
fill_fraction=scan_.temporal_fill_fraction,
nrois=scan_.num_rois if scan_.is_multiROI else 0,
scan_duration=scan_.num_frames / scan_.fps,
)
)
# Insert Field(s)
if scan.is_multiROI:
if scan_.is_multiROI:
self.Field.insert(
[
dict(
key,
field_idx=field_id,
px_height=scan.field_heights[field_id],
px_width=scan.field_widths[field_id],
um_height=scan.field_heights_in_microns[field_id],
um_width=scan.field_widths_in_microns[field_id],
px_height=scan_.field_heights[field_id],
px_width=scan_.field_widths[field_id],
um_height=scan_.field_heights_in_microns[field_id],
um_width=scan_.field_widths_in_microns[field_id],
field_x=(
x_zero
+ scan._degrees_to_microns(scan.fields[field_id].x)
if x_zero
else None
(x_zero or 0)
+ scan_._degrees_to_microns(scan_.fields[field_id].x)
),
field_y=(
y_zero
+ scan._degrees_to_microns(scan.fields[field_id].y)
if y_zero
else None
),
field_z=(
z_zero + scan.fields[field_id].depth if z_zero else None
(y_zero or 0)
+ scan_._degrees_to_microns(scan_.fields[field_id].y)
),
delay_image=scan.field_offsets[field_id],
roi=scan.field_rois[field_id][0],
field_z=((z_zero or 0) + scan_.fields[field_id].depth),
delay_image=scan_.field_offsets[field_id],
roi=scan_.field_rois[field_id][0],
)
for field_id in range(scan.num_fields)
for field_id in range(scan_.num_fields)
]
)
else:
Expand All @@ -357,20 +351,20 @@ def make(self, key):
dict(
key,
field_idx=plane_idx,
px_height=scan.image_height,
px_width=scan.image_width,
um_height=getattr(scan, "image_height_in_microns", None),
um_width=getattr(scan, "image_width_in_microns", None),
px_height=scan_.image_height,
px_width=scan_.image_width,
um_height=getattr(scan_, "image_height_in_microns", None),
um_width=getattr(scan_, "image_width_in_microns", None),
field_x=x_zero if x_zero else None,
field_y=y_zero if y_zero else None,
field_z=(
z_zero + scan.scanning_depths[plane_idx]
z_zero + scan_.scanning_depths[plane_idx]
if z_zero
else None
),
delay_image=scan.field_offsets[plane_idx],
delay_image=scan_.field_offsets[plane_idx],
)
for plane_idx in range(scan.num_scanning_depths)
for plane_idx in range(scan_.num_scanning_depths)
]
)
elif acq_software == "Scanbox":
Expand Down
2 changes: 1 addition & 1 deletion element_calcium_imaging/version.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
"""Package metadata."""

__version__ = "0.9.3"
__version__ = "0.9.5"
2 changes: 1 addition & 1 deletion notebooks/demo.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
}
],
"source": [
"from tutorial_pipeline import *"
"from tests.tutorial_pipeline import *"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion notebooks/quality_metrics.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
"import datetime\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"from tutorial_pipeline import scan, imaging"
"from tests.tutorial_pipeline import scan, imaging"
]
},
{
Expand Down
Loading

0 comments on commit 7662b57

Please sign in to comment.