diff --git a/.conda_mac/meta.yaml b/.conda_mac/meta.yaml index 173c412df..cf46e60e6 100644 --- a/.conda_mac/meta.yaml +++ b/.conda_mac/meta.yaml @@ -58,6 +58,7 @@ requirements: - conda-forge::qudida - conda-forge::albumentations >=1.4.15 # Handle Nan keypoints - conda-forge::ndx-pose <0.2.0 + - conda-forge::lazy_loader run: - conda-forge::python >=3.10.0,<3.11.0 @@ -89,3 +90,4 @@ requirements: - conda-forge::qudida - conda-forge::albumentations >=1.4.15 # Handle Nan keypoints - conda-forge::ndx-pose <0.2.0 + - conda-forge::lazy_loader diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 108c2a65e..db05d2714 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -28,7 +28,7 @@ Please include information about how you installed. - OS: - Version(s): - + - SLEAP installation method (listed [here](https://sleap.ai/installation.html#)): - [ ] [Conda from package](https://sleap.ai/installation.html#conda-package) - [ ] [Conda from source](https://sleap.ai/installation.html#conda-from-source) diff --git a/.github/workflows/build_conda_ci.yml b/.github/workflows/build_conda_ci.yml index 0d5980730..d5503e296 100644 --- a/.github/workflows/build_conda_ci.yml +++ b/.github/workflows/build_conda_ci.yml @@ -11,7 +11,7 @@ on: - "requirements.txt" - "dev_requirements.txt" - "environment_build.yml" - - ".github/workflows/build_conda_ci.yml" + - ".github/workflows/build_conda_ci.yml" # Run # If RUN_BUILD_JOB is set to true, then RUN_ID will be overwritten to the current run id env: diff --git a/.github/workflows/build_pypi_ci.yml b/.github/workflows/build_pypi_ci.yml index 7621c2b5b..4c8630b30 100644 --- a/.github/workflows/build_pypi_ci.yml +++ b/.github/workflows/build_pypi_ci.yml @@ -11,7 +11,7 @@ on: - "jupyter_requirements.txt" - "pypi_requirements.txt" - "environment_build.yml" - - ".github/workflows/build_pypi_ci.yml" + - ".github/workflows/build_pypi_ci.yml" # Run jobs: build: diff --git a/.github/workflows/website.yml b/.github/workflows/website.yml index 3e4008ea9..baedc1e1c 100644 --- a/.github/workflows/website.yml +++ b/.github/workflows/website.yml @@ -8,7 +8,7 @@ on: # 'main' triggers updates to 'sleap.ai', all others to 'sleap.ai/develop' - main - develop - - liezl/remove-python-version-from-website-build-yml + - liezl/bump-to-1.4.1a3-py310 paths: - "docs/**" - "README.rst" diff --git a/.gitignore b/.gitignore index 8db0e1f74..b5333c766 100644 --- a/.gitignore +++ b/.gitignore @@ -117,3 +117,5 @@ venv.bak/ # OS generated files .DS_Store + +models/ \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index a429c7928..466f8d2b1 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -28,7 +28,7 @@ copyright = f"2019–{date.today().year}, Talmo Lab" # The short X.Y version -version = "1.4.1a2" +version = "1.4.1a3" # Get the sleap version # with open("../sleap/version.py") as f: @@ -36,7 +36,7 @@ # version = re.search("\d.+(?=['\"])", version_file).group(0) # Release should be the full branch name -release = "v1.4.1a2" +release = "v1.4.1a3" html_title = f"SLEAP ({release})" html_short_title = "SLEAP" diff --git a/docs/installation.md b/docs/installation.md index 4799a0893..5c38f5d3e 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -5,12 +5,12 @@ SLEAP can be installed as a Python package on Windows, Linux, and Mac OS. For qu ````{tabs} ```{group-tab} Windows and Linux ```bash - conda create -y -n sleap -c conda-forge -c nvidia -c sleap -c anaconda sleap=1.4.1a2 + conda create -y -n sleap -c conda-forge -c nvidia -c sleap -c anaconda sleap=1.4.1a3 ``` ``` ```{group-tab} Mac OS ```bash - conda create -y -n sleap -c conda-forge -c anaconda -c sleap sleap=1.4.1a2 + conda create -y -n sleap -c conda-forge -c anaconda -c sleap sleap=1.4.1a3 ``` ``` ```` @@ -147,7 +147,7 @@ SLEAP can be installed three different ways: via {ref}`conda package=1.4.15 # Handle Nan keypoints - conda-forge::ndx-pose <0.2.0 + - conda-forge::lazy_loader # Packages required by tensorflow to find/use GPUs - conda-forge::cudatoolkit ==11.3.1 diff --git a/environment_mac.yml b/environment_mac.yml index d73b645dc..131af52a2 100644 --- a/environment_mac.yml +++ b/environment_mac.yml @@ -40,5 +40,6 @@ dependencies: - conda-forge::qudida - conda-forge::albumentations >=1.4.15 # Handle Nan keypoints - conda-forge::ndx-pose <0.2.0 + - conda-forge::lazy_loader - pip: - - "--editable=.[conda_dev]" + - "--editable=.[conda_dev]" diff --git a/environment_no_cuda.yml b/environment_no_cuda.yml index b5c9bea47..3c096460d 100644 --- a/environment_no_cuda.yml +++ b/environment_no_cuda.yml @@ -42,6 +42,6 @@ dependencies: - conda-forge::qudida - conda-forge::albumentations >=1.4.15 # Handle Nan keypoints - conda-forge::ndx-pose <0.2.0 - + - conda-forge::lazy_loader - pip: - "--editable=.[conda_dev]" diff --git a/pypi_requirements.txt b/pypi_requirements.txt index 53393e700..0a00658d0 100644 --- a/pypi_requirements.txt +++ b/pypi_requirements.txt @@ -28,13 +28,15 @@ scipy>=1.7.0 scikit-image scikit-learn>=1.0.0 seaborn -tensorflow==2.9.2; platform_machine != 'arm64' -tensorflow-hub -albumentations>=1.4.15 # Handle Nan keypoints +albumentations ndx-pose<0.2.0 +lazy-loader +tensorflow==2.9.2; platform_machine != 'arm64' +tensorflow-hub; platform_machine != 'arm64' # Silicon Mac specific packages +tensorflow-hub >=0.14.0,<=0.16.1; sys_platform == 'darwin' and platform_machine == 'arm64' tensorflow-macos >=2.10.0,<2.13.0; sys_platform == 'darwin' and platform_machine == 'arm64' -tensorflow-metal; sys_platform == 'darwin' and platform_machine == 'arm64' +tensorflow-metal >=0.8.0,<=1.1.0; sys_platform == 'darwin' and platform_machine == 'arm64' # Dependencies of dependencies # tensorboard 2.11.2 has requirement protobuf<4,>=3.9.2 diff --git a/sleap/__init__.py b/sleap/__init__.py index 7e506b10a..6471facf8 100644 --- a/sleap/__init__.py +++ b/sleap/__init__.py @@ -1,16 +1,21 @@ import logging import sys +import os +# Disable albumentations update check before imports. +os.environ["NO_ALBUMENTATIONS_UPDATE"] = "1" # Setup logging to stdout logging.basicConfig(stream=sys.stdout, level=logging.INFO) # Import submodules we want available at top-level from sleap.version import __version__, versions -from sleap.io.dataset import Labels, load_file + from sleap.io.video import Video, load_video from sleap.instance import LabeledFrame, Instance, PredictedInstance, Track from sleap.skeleton import Skeleton +from sleap.io.dataset import Labels, load_file + import sleap.nn from sleap.nn.data import pipelines from sleap.nn import inference diff --git a/sleap/gui/app.py b/sleap/gui/app.py index 6bc5e1d53..94af77fd5 100644 --- a/sleap/gui/app.py +++ b/sleap/gui/app.py @@ -230,9 +230,11 @@ def closeEvent(self, event): # Save preferences. prefs.save() + will_accept = False + if not self.state["has_changes"]: # No unsaved changes, so accept event (close) - event.accept() + will_accept = True else: msgBox = QMessageBox() msgBox.setText("Do you want to save the changes to this project?") @@ -249,12 +251,16 @@ def closeEvent(self, event): event.ignore() elif ret_val == QMessageBox.Discard: # don't save, just close - event.accept() + will_accept = True elif ret_val == QMessageBox.Save: # save self.commands.saveProject() # accept event (closes window) - event.accept() + will_accept = True + + if will_accept: + self.player.cleanup() + event.accept() def dragEnterEvent(self, event): # TODO: Parse filenames and accept only if valid ext (or folder) diff --git a/sleap/gui/commands.py b/sleap/gui/commands.py index dfc0dbad8..3b2fb1aec 100644 --- a/sleap/gui/commands.py +++ b/sleap/gui/commands.py @@ -39,7 +39,6 @@ class which inherits from `AppCommand` (or a more specialized class such as from typing import Callable, Dict, Iterator, List, Optional, Tuple, Type, Union, cast import attr -import cv2 import numpy as np from qtpy import QtCore, QtGui, QtWidgets @@ -1794,8 +1793,7 @@ def ask(context: CommandContext, params: dict) -> bool: """Shows gui for replacing videos in project.""" def _get_truncation_message(truncation_messages, path, video): - reader = cv2.VideoCapture(path) - last_vid_frame = int(reader.get(cv2.CAP_PROP_FRAME_COUNT)) + last_vid_frame = len(Video.from_filename(path)) lfs: List[LabeledFrame] = list(context.labels.get(video)) if lfs is not None: lfs.sort(key=lambda lf: lf.frame_idx) diff --git a/sleap/gui/dialogs/importvideos.py b/sleap/gui/dialogs/importvideos.py index 005b75082..2d1dcfa1c 100644 --- a/sleap/gui/dialogs/importvideos.py +++ b/sleap/gui/dialogs/importvideos.py @@ -45,7 +45,9 @@ import h5py import qimage2ndarray -import cv2 +import lazy_loader + +cv2 = lazy_loader.load("cv2") from typing import Any, Dict, List, Optional diff --git a/sleap/gui/dialogs/missingfiles.py b/sleap/gui/dialogs/missingfiles.py index 0451e09f9..46f7b1b2f 100644 --- a/sleap/gui/dialogs/missingfiles.py +++ b/sleap/gui/dialogs/missingfiles.py @@ -1,14 +1,10 @@ """ Gui for prompting the user to locate one or more missing files. """ - import os - from pathlib import Path, PurePath from typing import Callable, List - from qtpy import QtWidgets, QtCore, QtGui - from sleap.io import pathutils from sleap.gui.dialogs.filedialog import FileDialog diff --git a/sleap/gui/widgets/video.py b/sleap/gui/widgets/video.py index 949703020..0fc98e6dc 100644 --- a/sleap/gui/widgets/video.py +++ b/sleap/gui/widgets/video.py @@ -116,11 +116,21 @@ def __init__(self, *args, **kwargs): # event to event queue from the request handler. self.process.connect(self.doProcessing) + # Defer timer creation to worker thread construction time + self.timer = None + + @QtCore.Slot() + def start_timers(self): # Start timer which will trigger processing events every 20 ms when we're free - self.timer = QtCore.QTimer() + self.timer = QtCore.QTimer(self) self.timer.timeout.connect(self.doProcessing) self.timer.start(20) + @QtCore.Slot() + def stop_timers(self): + if self.timer: + self.timer.stop() + def doProcessing(self): self._last_process_time = time.time() @@ -211,7 +221,7 @@ def __init__( *args, **kwargs, ): - super(QtVideoPlayer, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.setAcceptDrops(True) @@ -257,11 +267,12 @@ def __init__( self._loader_thread = QtCore.QThread() self._video_image_loader = LoadImageWorker() self._video_image_loader.moveToThread(self._loader_thread) + self._loader_thread.started.connect(self._video_image_loader.start_timers) self._loader_thread.start() # Connect signal so that image will be shown after it's loaded self._video_image_loader.result.connect( - lambda qimage: self.view.setImage(qimage) + self.on_new_frame, QtCore.Qt.QueuedConnection ) def update_selection_state(a, b): @@ -282,16 +293,27 @@ def update_selection_state(a, b): self.view.show() - # Call cleanup method when application exits to end worker thread - self.destroyed.connect(self.cleanup) - atexit.register(self.cleanup) + # Call cleanup method when application exits to end worker thread. + # Note: This is commented out in favor of the MainWindow.closeEvent() path. + # self.destroyed.connect(self.cleanup) + # app = QApplication.instance() + # if app: + # app.aboutToQuit.connect(self.cleanup) if video is not None: self.load_video(video) + def on_new_frame(self, qimage): + self.view.setImage(qimage) + def cleanup(self): - self._loader_thread.quit() - self._loader_thread.wait() + if self._loader_thread.isRunning(): + QtCore.QMetaObject.invokeMethod( + self._video_image_loader, "stop_timers", QtCore.Qt.QueuedConnection + ) + QtWidgets.QApplication.processEvents() + self._loader_thread.quit() + self._loader_thread.wait() def dragEnterEvent(self, event): if self.parentWidget(): diff --git a/sleap/info/feature_suggestions.py b/sleap/info/feature_suggestions.py index a5f773fa7..747ba0279 100644 --- a/sleap/info/feature_suggestions.py +++ b/sleap/info/feature_suggestions.py @@ -12,8 +12,6 @@ from time import time from typing import Dict, List, Optional, Tuple -import cv2 - from sklearn.decomposition import PCA from sklearn.cluster import KMeans @@ -22,6 +20,9 @@ from skimage.util.shape import view_as_windows from sleap.io.video import Video +import lazy_loader + +cv2 = lazy_loader.load("cv2") logger = logging.getLogger(__name__) diff --git a/sleap/io/dataset.py b/sleap/io/dataset.py index 1b894089f..3f7db49c2 100644 --- a/sleap/io/dataset.py +++ b/sleap/io/dataset.py @@ -76,14 +76,13 @@ make_instance_cattr, PredictedInstance, ) - from sleap.io import pathutils from sleap.io.video import Video, ImgStoreVideo, HDF5Video from sleap.gui.suggestions import SuggestionFrame -from sleap.gui.dialogs.missingfiles import MissingFilesDialog from sleap.rangelist import RangeList from sleap.util import uniquify, json_dumps + """ The version number to put in the Labels JSON format. """ @@ -2657,6 +2656,8 @@ def video_callback( context["changed_on_load"] = True if use_gui: + from sleap.gui.dialogs.missingfiles import MissingFilesDialog + # If there are still missing paths, prompt user if sum(missing): # If we are using dummy for any video not found by user diff --git a/sleap/io/video.py b/sleap/io/video.py index c8272cfbd..5f9456c93 100644 --- a/sleap/io/video.py +++ b/sleap/io/video.py @@ -4,8 +4,6 @@ import shutil import h5py as h5 -import cv2 -import imgstore import numpy as np import attr import cattr @@ -15,6 +13,10 @@ from typing import Iterable, List, Optional, Tuple, Union, Text from sleap.util import json_loads, json_dumps +import lazy_loader + +cv2 = lazy_loader.load("cv2") +imgstore = lazy_loader.load("imgstore") logger = logging.getLogger(__name__) diff --git a/sleap/io/videowriter.py b/sleap/io/videowriter.py index cd710c9d5..99db70bb1 100644 --- a/sleap/io/videowriter.py +++ b/sleap/io/videowriter.py @@ -10,9 +10,11 @@ """ from abc import ABC, abstractmethod -import cv2 import numpy as np import imageio.v2 as iio +import lazy_loader + +cv2 = lazy_loader.load("cv2") class VideoWriter(ABC): diff --git a/sleap/io/visuals.py b/sleap/io/visuals.py index f2dde0be3..7e3f3f2c8 100644 --- a/sleap/io/visuals.py +++ b/sleap/io/visuals.py @@ -7,19 +7,18 @@ from sleap.io.dataset import Labels from sleap.gui.color import ColorManager from sleap.util import usable_cpu_count - -import cv2 import os import numpy as np import math from collections import deque from time import perf_counter from typing import List, Optional, Tuple - from queue import Queue from threading import Thread - import logging +import lazy_loader + +cv2 = lazy_loader.load("cv2") logger = logging.getLogger(__name__) diff --git a/sleap/nn/callbacks.py b/sleap/nn/callbacks.py index ed420408e..9fa3ec01e 100644 --- a/sleap/nn/callbacks.py +++ b/sleap/nn/callbacks.py @@ -4,6 +4,7 @@ import logging import numpy as np import tensorflow as tf +import qtpy import zmq import io import os diff --git a/sleap/nn/data/confidence_maps.py b/sleap/nn/data/confidence_maps.py index 5895d42c6..d9f6b2c44 100644 --- a/sleap/nn/data/confidence_maps.py +++ b/sleap/nn/data/confidence_maps.py @@ -310,7 +310,7 @@ def generate_multi_confmaps(example): # Map transformation. output_ds = input_ds.map( - generate_multi_confmaps, num_parallel_calls=tf.data.experimental.AUTOTUNE + generate_multi_confmaps, num_parallel_calls=tf.data.AUTOTUNE ) return output_ds @@ -450,9 +450,7 @@ def generate_confmaps(example): return example # Map transformation. - output_ds = input_ds.map( - generate_confmaps, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(generate_confmaps, num_parallel_calls=tf.data.AUTOTUNE) return output_ds @@ -550,7 +548,5 @@ def generate_confmaps(example): return example # Map transformation. - output_ds = input_ds.map( - generate_confmaps, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(generate_confmaps, num_parallel_calls=tf.data.AUTOTUNE) return output_ds diff --git a/sleap/nn/data/dataset_ops.py b/sleap/nn/data/dataset_ops.py index 6c46fae38..e1f1ad55e 100644 --- a/sleap/nn/data/dataset_ops.py +++ b/sleap/nn/data/dataset_ops.py @@ -138,21 +138,24 @@ def expand(example): return example # Ensure that all keys have a rank of at least 1 (i.e., scalars). - ds_output = ds_input.map( - expand, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + ds_output = ds_input.map(expand, num_parallel_calls=tf.data.AUTOTUNE) # Batch elements as ragged tensors. - ds_output = ds_output.apply( - tf.data.experimental.dense_to_ragged_batch( + if hasattr(ds_output, "ragged_batch"): + ds_output = ds_output.ragged_batch( batch_size=self.batch_size, drop_remainder=self.drop_remainder ) - ) + else: + ds_output = ds_output.apply( + tf.data.experimental.dense_to_ragged_batch( + batch_size=self.batch_size, drop_remainder=self.drop_remainder + ) + ) if self.unrag: # Convert elements back into dense tensors with padding. ds_output = ds_output.map( - unrag_example, num_parallel_calls=tf.data.experimental.AUTOTUNE + unrag_example, num_parallel_calls=tf.data.AUTOTUNE ) return ds_output @@ -238,12 +241,12 @@ class Prefetcher: Attributes: prefetch: If False, returns the input dataset unmodified. buffer_size: Keep `buffer_size` elements loaded in the buffer. If set to -1 - (`tf.data.experimental.AUTOTUNE`), this value will be optimized + (`tf.data.AUTOTUNE`), this value will be optimized automatically to decrease latency. """ prefetch: bool = True - buffer_size: int = tf.data.experimental.AUTOTUNE + buffer_size: int = tf.data.AUTOTUNE @property def input_keys(self) -> List[Text]: diff --git a/sleap/nn/data/edge_maps.py b/sleap/nn/data/edge_maps.py index e3a9caebf..85904884b 100644 --- a/sleap/nn/data/edge_maps.py +++ b/sleap/nn/data/edge_maps.py @@ -353,7 +353,5 @@ def generate_pafs(example): return example # Map transformation. - output_ds = input_ds.map( - generate_pafs, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(generate_pafs, num_parallel_calls=tf.data.AUTOTUNE) return output_ds diff --git a/sleap/nn/data/general.py b/sleap/nn/data/general.py index 86d840ec2..bd5badfd8 100644 --- a/sleap/nn/data/general.py +++ b/sleap/nn/data/general.py @@ -39,9 +39,7 @@ def rename_keys(example): return example # Map the main processing function to each example. - output_ds = input_ds.map( - rename_keys, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(rename_keys, num_parallel_calls=tf.data.AUTOTUNE) return output_ds @@ -70,9 +68,7 @@ def filter_keys(example): return {key: example[key] for key in self.keep_keys} # Map the main processing function to each example. - output_ds = input_ds.map( - filter_keys, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(filter_keys, num_parallel_calls=tf.data.AUTOTUNE) return output_ds @@ -106,9 +102,7 @@ def move_keys(example): return example # Map the main processing function to each example. - output_ds = input_ds.map( - move_keys, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(move_keys, num_parallel_calls=tf.data.AUTOTUNE) return output_ds @@ -143,8 +137,6 @@ def transform_dataset(self, input_ds: tf.data.Dataset) -> tf.data.Dataset: """Create a dataset that contains transformed data.""" # Map the main processing function to each example. - output_ds = input_ds.map( - self.func, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(self.func, num_parallel_calls=tf.data.AUTOTUNE) return output_ds diff --git a/sleap/nn/data/grouping.py b/sleap/nn/data/grouping.py index 2c17b2b98..8ed3a5197 100644 --- a/sleap/nn/data/grouping.py +++ b/sleap/nn/data/grouping.py @@ -73,7 +73,7 @@ def group_examples_iter(examples): # # # Here we apply the encoder # encoded_key_ds = ds_input.map( -# encode_group_key, num_parallel_calls=tf.data.experimental.AUTOTUNE +# encode_group_key, num_parallel_calls=tf.data.AUTOTUNE # ) # # # We'll want to "reduce" matching keys by creating a list of examples diff --git a/sleap/nn/data/identity.py b/sleap/nn/data/identity.py index 7d5c57be3..70d4c1520 100644 --- a/sleap/nn/data/identity.py +++ b/sleap/nn/data/identity.py @@ -106,7 +106,7 @@ def generate_class_vectors(example): # Map transformation. output_ds = input_ds.map( - generate_class_vectors, num_parallel_calls=tf.data.experimental.AUTOTUNE + generate_class_vectors, num_parallel_calls=tf.data.AUTOTUNE ) return output_ds @@ -197,6 +197,6 @@ def generate_class_maps(example): # Map transformation. output_ds = input_ds.map( - generate_class_maps, num_parallel_calls=tf.data.experimental.AUTOTUNE + generate_class_maps, num_parallel_calls=tf.data.AUTOTUNE ) return output_ds diff --git a/sleap/nn/data/inference.py b/sleap/nn/data/inference.py index 772ac3f8b..8b4778afb 100644 --- a/sleap/nn/data/inference.py +++ b/sleap/nn/data/inference.py @@ -65,9 +65,7 @@ def predict(example): return example - output_ds = input_ds.map( - predict, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(predict, num_parallel_calls=tf.data.AUTOTUNE) return output_ds @@ -135,9 +133,7 @@ def find_peaks(example): return example - output_ds = input_ds.map( - find_peaks, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(find_peaks, num_parallel_calls=tf.data.AUTOTUNE) return output_ds @@ -194,9 +190,7 @@ def find_peaks(example): return example - output_ds = input_ds.map( - find_peaks, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(find_peaks, num_parallel_calls=tf.data.AUTOTUNE) return output_ds @@ -274,9 +268,7 @@ def find_peaks(example): return example - output_ds = input_ds.map( - find_peaks, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(find_peaks, num_parallel_calls=tf.data.AUTOTUNE) return output_ds @@ -341,8 +333,6 @@ def norm_instance(example): return example # Map the main processing function to each example. - output_ds = input_ds.map( - norm_instance, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(norm_instance, num_parallel_calls=tf.data.AUTOTUNE) return output_ds diff --git a/sleap/nn/data/instance_centroids.py b/sleap/nn/data/instance_centroids.py index bea7f5f8c..7b774ac1a 100644 --- a/sleap/nn/data/instance_centroids.py +++ b/sleap/nn/data/instance_centroids.py @@ -189,7 +189,5 @@ def find_centroids(frame_data): return frame_data # Map transformation. - ds_output = ds_input.map( - find_centroids, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + ds_output = ds_input.map(find_centroids, num_parallel_calls=tf.data.AUTOTUNE) return ds_output diff --git a/sleap/nn/data/instance_cropping.py b/sleap/nn/data/instance_cropping.py index 1cfd5eee7..79e223648 100644 --- a/sleap/nn/data/instance_cropping.py +++ b/sleap/nn/data/instance_cropping.py @@ -447,9 +447,7 @@ def crop_instances(frame_data): return instances_data # Map the main processing function to each example. - output_ds = input_ds.map( - crop_instances, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(crop_instances, num_parallel_calls=tf.data.AUTOTUNE) if self.unbatch: # Unbatch to split frame-level examples into individual instance-level # examples. @@ -551,9 +549,7 @@ def crop_instances(frame_data): return instances_data # Map the main processing function to each example. - output_ds = input_ds.map( - crop_instances, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(crop_instances, num_parallel_calls=tf.data.AUTOTUNE) # Unbatch to split frame-level examples into individual instance-level examples. output_ds = output_ds.unbatch() diff --git a/sleap/nn/data/normalization.py b/sleap/nn/data/normalization.py index cf0fb9cce..a20618246 100644 --- a/sleap/nn/data/normalization.py +++ b/sleap/nn/data/normalization.py @@ -376,7 +376,5 @@ def normalize(example): return example # Map transformation. - ds_output = ds_input.map( - normalize, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + ds_output = ds_input.map(normalize, num_parallel_calls=tf.data.AUTOTUNE) return ds_output diff --git a/sleap/nn/data/resizing.py b/sleap/nn/data/resizing.py index a2fcbe5de..8d2143ba3 100644 --- a/sleap/nn/data/resizing.py +++ b/sleap/nn/data/resizing.py @@ -247,9 +247,7 @@ def resize(example): return example # Map transformation. - ds_output = ds_input.map( - resize, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + ds_output = ds_input.map(resize, num_parallel_calls=tf.data.AUTOTUNE) return ds_output @@ -463,9 +461,7 @@ def resize_and_pad(example): return example - ds_output = ds_input.map( - resize_and_pad, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + ds_output = ds_input.map(resize_and_pad, num_parallel_calls=tf.data.AUTOTUNE) return ds_output @@ -510,8 +506,6 @@ def rescale_points(example): return example # Map the main processing function to each example. - output_ds = input_ds.map( - rescale_points, num_parallel_calls=tf.data.experimental.AUTOTUNE - ) + output_ds = input_ds.map(rescale_points, num_parallel_calls=tf.data.AUTOTUNE) return output_ds diff --git a/sleap/nn/tracking.py b/sleap/nn/tracking.py index 558aa9309..84410584c 100644 --- a/sleap/nn/tracking.py +++ b/sleap/nn/tracking.py @@ -4,7 +4,9 @@ import abc import attr import numpy as np -import cv2 +import lazy_loader + +cv2 = lazy_loader.load("cv2") import functools from typing import Callable, Deque, Dict, Iterable, List, Optional, Tuple diff --git a/sleap/nn/training.py b/sleap/nn/training.py index c3692637c..3da97b13f 100644 --- a/sleap/nn/training.py +++ b/sleap/nn/training.py @@ -268,11 +268,15 @@ def setup_optimizer(config: OptimizationConfig) -> tf.keras.optimizers.Optimizer """Set up model optimizer from config.""" if config.optimizer.lower() == "adam": # Only use amsgrad on non-M1 Mac platforms (not currently supported) - is_m1 = "arm64" in platform.platform() - use_amsgrad = not is_m1 - optimizer = tf.keras.optimizers.Adam( - learning_rate=config.initial_learning_rate, amsgrad=use_amsgrad - ) + is_apple_silicon = "arm64" in platform.platform() + if is_apple_silicon: + optimizer = tf.keras.optimizers.legacy.Adam( + learning_rate=config.initial_learning_rate + ) + else: + optimizer = tf.keras.optimizers.Adam( + learning_rate=config.initial_learning_rate, amsgrad=True + ) elif config.optimizer.lower() == "rmsprop": optimizer = tf.keras.optimizers.RMSprop( learning_rate=config.initial_learning_rate diff --git a/sleap/skeleton.py b/sleap/skeleton.py index fbd1b909c..3c3f91958 100644 --- a/sleap/skeleton.py +++ b/sleap/skeleton.py @@ -1504,7 +1504,7 @@ def to_json(self, node_to_idx: Optional[Dict[Node, int]] = None) -> str: indexed_node_graph = self._graph # Encode to JSON - graph = json_graph.node_link_data(indexed_node_graph) + graph = json_graph.node_link_data(indexed_node_graph, edges="links") # SLEAP v1.3.0 added `description` and `preview_image` to `Skeleton`, but saving # these fields breaks data format compatibility. Currently, these are only @@ -1569,7 +1569,7 @@ def from_json( """ dicts: dict = SkeletonDecoder.decode(json_str) nx_graph = dicts.get("nx_graph", dicts) - graph = json_graph.node_link_graph(nx_graph) + graph = json_graph.node_link_graph(nx_graph, edges="links") # Replace graph node indices with corresponding nodes from node_map if idx_to_node is not None: diff --git a/sleap/version.py b/sleap/version.py index 7711477cb..4a5b8da1c 100644 --- a/sleap/version.py +++ b/sleap/version.py @@ -11,7 +11,7 @@ Must be a semver string, "aN" should be appended for alpha releases. """ -__version__ = "1.4.1a2" +__version__ = "1.4.1a3" def versions(): diff --git a/tests/io/test_videowriter.py b/tests/io/test_videowriter.py index 35d9bc6df..fae887364 100644 --- a/tests/io/test_videowriter.py +++ b/tests/io/test_videowriter.py @@ -1,5 +1,7 @@ import os -import cv2 +import lazy_loader + +cv2 = lazy_loader.load("cv2") from pathlib import Path from sleap.io.videowriter import VideoWriter, VideoWriterOpenCV, VideoWriterImageio diff --git a/tests/io/test_visuals.py b/tests/io/test_visuals.py index a1223bfdf..0587983c1 100644 --- a/tests/io/test_visuals.py +++ b/tests/io/test_visuals.py @@ -1,7 +1,9 @@ import numpy as np import os import pytest -import cv2 +import lazy_loader + +cv2 = lazy_loader.load("cv2") from sleap.io.dataset import Labels from sleap.io.visuals import ( save_labeled_video, diff --git a/tests/test_skeleton.py b/tests/test_skeleton.py index 2320342f6..177fe1f8c 100644 --- a/tests/test_skeleton.py +++ b/tests/test_skeleton.py @@ -16,7 +16,7 @@ def test_decoded_encoded_Skeleton_from_load_json(fly_legs_skeleton_json): skeleton = Skeleton.load_json(fly_legs_skeleton_json) # Get the graph from the skeleton indexed_node_graph = skeleton._graph - graph = json_graph.node_link_data(indexed_node_graph) + graph = json_graph.node_link_data(indexed_node_graph, edges="links") # Encode the graph as a json string to test .encode method encoded_json_str = SkeletonEncoder.encode(graph) @@ -40,7 +40,7 @@ def test_decoded_encoded_Skeleton(skeleton_fixture_name, request): # Get the graph from the skeleton indexed_node_graph = skeleton._graph - graph = json_graph.node_link_data(indexed_node_graph) + graph = json_graph.node_link_data(indexed_node_graph, edges="links") # Encode the graph as a json string to test .encode method encoded_json_str = SkeletonEncoder.encode(graph)