diff --git a/MANIFEST.in b/MANIFEST.in index 6bde708a..795b4a18 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,8 +1,9 @@ include LICENSE include README.md -recursive-include crabs *.md -recursive-include crabs *.sh +recursive-include guides *.md +recursive-include bash_scripts *.sh +recursive-include opencv_notebooks *.py exclude .pre-commit-config.yaml diff --git a/crabs/pose_estimation/cluster bash scripts/slurm_inference.sh b/bash_scripts/pose_estimation/slurm_inference.sh similarity index 100% rename from crabs/pose_estimation/cluster bash scripts/slurm_inference.sh rename to bash_scripts/pose_estimation/slurm_inference.sh diff --git a/crabs/pose_estimation/cluster bash scripts/slurm_render.sh b/bash_scripts/pose_estimation/slurm_render.sh similarity index 100% rename from crabs/pose_estimation/cluster bash scripts/slurm_render.sh rename to bash_scripts/pose_estimation/slurm_render.sh diff --git a/crabs/pose_estimation/cluster bash scripts/slurm_train.sh b/bash_scripts/pose_estimation/slurm_train.sh similarity index 100% rename from crabs/pose_estimation/cluster bash scripts/slurm_train.sh rename to bash_scripts/pose_estimation/slurm_train.sh diff --git a/crabs/bboxes_labelling/cluster_bash_scripts/run_additional_channel.sh b/bash_scripts/run_additional_channel.sh similarity index 100% rename from crabs/bboxes_labelling/cluster_bash_scripts/run_additional_channel.sh rename to bash_scripts/run_additional_channel.sh diff --git a/crabs/bboxes_labelling/cluster_bash_scripts/run_clip_video.sh b/bash_scripts/run_clip_video.sh similarity index 100% rename from crabs/bboxes_labelling/cluster_bash_scripts/run_clip_video.sh rename to bash_scripts/run_clip_video.sh diff --git a/crabs/bboxes_labelling/cluster_bash_scripts/run_frame_extraction_array.sh b/bash_scripts/run_frame_extraction_array.sh similarity index 100% rename from crabs/bboxes_labelling/cluster_bash_scripts/run_frame_extraction_array.sh rename to bash_scripts/run_frame_extraction_array.sh diff --git a/crabs/bboxes_labelling/cluster_bash_scripts/run_frame_extraction_local.sh b/bash_scripts/run_frame_extraction_local.sh similarity index 100% rename from crabs/bboxes_labelling/cluster_bash_scripts/run_frame_extraction_local.sh rename to bash_scripts/run_frame_extraction_local.sh diff --git a/crabs/bboxes_labelling/annotations_utils.py b/crabs/bboxes_labelling/annotations_utils.py index 958b45d1..448b2c82 100644 --- a/crabs/bboxes_labelling/annotations_utils.py +++ b/crabs/bboxes_labelling/annotations_utils.py @@ -137,11 +137,12 @@ def combine_multiple_via_jsons( return str(json_out_fullpath) +DEFAULT_CRAB_CATEGORY = {"id": 1, "name": "crab", "supercategory": "animal"} + + def convert_via_json_to_coco( json_file_path: str, - coco_category_ID: int = 1, - coco_category_name: str = "crab", - coco_supercategory_name: str = "animal", + coco_category: dict = DEFAULT_CRAB_CATEGORY, coco_out_filename: Optional[str] = None, coco_out_dir: Optional[str] = None, ) -> str: @@ -183,18 +184,11 @@ def convert_via_json_to_coco( with open(json_file_path) as json_file: annotation_data = json.load(json_file) - # Create data structure for COCO format - coco_categories = [ - { - "id": coco_category_ID, - "name": coco_category_name, - "supercategory": coco_supercategory_name, - }, - ] + # Create data structure for COCO coco_data: dict[str, Any] = { "info": {}, "licenses": [], - "categories": coco_categories, + "categories": [coco_category], "images": [], "annotations": [], } @@ -222,7 +216,7 @@ def convert_via_json_to_coco( annotation_data = { "id": annotation_id, "image_id": image_id, - "category_id": coco_category_ID, + "category_id": coco_category["id"], "bbox": [x, y, width, height], "area": width * height, "iscrowd": 0, diff --git a/crabs/bboxes_labelling/combine_and_format_annotations.py b/crabs/bboxes_labelling/combine_and_format_annotations.py index 47133601..1647deb1 100644 --- a/crabs/bboxes_labelling/combine_and_format_annotations.py +++ b/crabs/bboxes_labelling/combine_and_format_annotations.py @@ -1,6 +1,6 @@ from pathlib import Path -from annotations_utils import ( +from crabs.bboxes_labelling.annotations_utils import ( combine_multiple_via_jsons, convert_via_json_to_coco, ) diff --git a/crabs/dense_optical _flow/estimate_optical_flow_on_video.py b/crabs/dense_optical _flow/estimate_optical_flow_on_video.py deleted file mode 100644 index 602020b0..00000000 --- a/crabs/dense_optical _flow/estimate_optical_flow_on_video.py +++ /dev/null @@ -1,226 +0,0 @@ -import argparse -import sys -from pathlib import Path - -import cv2 -import numpy as np -import torch - -sys.path.append("RAFT/core") - -from RAFT.core.raft import RAFT # noqa: E402 -from RAFT.core.utils import flow_viz # noqa: E402 -from RAFT.core.utils.utils import InputPadder # noqa: E402 - -# ------------------- -# Set device -# ------------------- -if torch.cuda.is_available(): - DEVICE = "cuda" -elif torch.backends.mps.is_available(): - DEVICE = "mps" -else: - DEVICE = "cpu" - -FILE_EXTENSION = "mp4" - - -# ------------------- -# aux class and fns -# ------------------- -class dotdict(dict): - """ - dot.notation access to dictionary attributes - # https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary. - """ - - __getattr__ = dict.get - __setattr__ = dict.__setitem__ # type: ignore[assignment] - __delattr__ = dict.__delitem__ # type: ignore[assignment] - - -def opencv_cap_to_torch_tensor(opencv_cap_frame): - # opencv cap returns numpy array of size (h, w, channel) - img = np.array(opencv_cap_frame).astype( - np.uint8, - ) # TODO: it is already np.array do I need to make it np.unit8? - img = torch.from_numpy(img).permute(2, 0, 1).float() - return img[None].to(DEVICE) - - -# ------------------- -# core fn -# ------------------- -def run_model_on_video(args): - # ------------------- - # initialise model - # ------------------- - model = torch.nn.DataParallel(RAFT(args)) - if DEVICE in ["cpu", "mps"]: - model.load_state_dict(torch.load(args.model, map_location=DEVICE)) - else: - model.load_state_dict(torch.load(args.model)) - - model = model.module - model.to(DEVICE) - model.eval() - - # for every file in the input data dir - list_input_videos = sorted( - Path(args.input_dir).glob(f"*.{FILE_EXTENSION}"), - reverse=True, - ) - for input_file in list_input_videos: - # ----------------------------------------- - # initialise video capture - # ----------------------------------------- - cap = cv2.VideoCapture(str(input_file)) - - nframes = cap.get(cv2.CAP_PROP_FRAME_COUNT) # 9000 - width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # 1920.0 - height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # 1080.0 - frame_rate = cap.get(cv2.CAP_PROP_FPS) # 25fps - - print(f"File: {input_file}") - print(f"n frames: {nframes}") - print(f"size: {(width, height)}") - print(f"frame rate: {frame_rate}") - - # ----------------------------------------- - # initialise video writer - # ----------------------------------------- - output_dir = Path(args.output_dir) - # create output dir if it doesnt exist - output_dir.mkdir(parents=True, exist_ok=True) - videowriter_path = output_dir / Path( - Path(input_file).stem + "_flow.mp4" - ) - - # initialise videowriter - videowriter = cv2.VideoWriter( - str(videowriter_path), - cv2.VideoWriter_fourcc("m", "p", "4", "v"), - frame_rate, - tuple(int(x) for x in (width, height)), - ) - - # ----------------------------------------- - # run inference in every frame - # ----------------------------------------- - # mmmm quite slow..... - with torch.no_grad(): - # ensure we start capture at 0 - if cap.get(cv2.CAP_PROP_POS_FRAMES) == 0: - frame_idx = 0 - frame_idx_stop = ( - int(nframes / args.step_frames) * args.step_frames - ) - - while frame_idx < frame_idx_stop: - # set 'index to read next' to the desired position - cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx) - print(f"focus frame: {cap.get(cv2.CAP_PROP_POS_FRAMES)}") - - # read frame f - success_frame_1, frame_1 = cap.read() - # the index to read next is now at frame 2, - # set it as the next starting position - frame_idx = cap.get(cv2.CAP_PROP_POS_FRAMES) - - # read frame at f+n - cap.set( - cv2.CAP_PROP_POS_FRAMES, frame_idx + args.step_frames - ) - success_frame_2, frame_2 = cap.read() - # OJO index to read next is now at f+n+1, but we - # will reset it to frame_idx at the start of - # the next iteration - - # if at least one of them is not successfully read, exit - if not any([success_frame_1, success_frame_2]): - print( - "At least one frame was not read correctly. Exiting ...", - ) - break - - # make them torch tensors - image1 = opencv_cap_to_torch_tensor(frame_1) - # In example: output is torch tensor of - # size([1, 3, 436, 1024]) (h x w) - image2 = opencv_cap_to_torch_tensor(frame_2) - - # pad images (why?)--------- - padder = InputPadder(image1.shape) - image1, image2 = padder.pad(image1, image2) - - # compute flow - # output has batch channel on the first dim - # TODO: what is flow_low? (downsampled?) - flow_low, flow_up = model( - image1, image2, iters=20, test_mode=True - ) - - # convert output to numpy array and reorder channels - # first dim = batch size (1) - # second, third, fourth = color channel, height, width - flow_uv = flow_up[0].permute(1, 2, 0).cpu().numpy() - # permute: c x h x w ---> h x w x c - - # map optical flow to rgb image pixel space - # ATT opencv uses BGR for color order! - flow_colorwheel_bgr = flow_viz.flow_to_image( - flow_uv, - convert_to_bgr=True, - # if False (default) this function would convert - # the output to RGB - ) - - # add to videowriter - # (as an opencv function, it expects BGR input) - videowriter.write(flow_colorwheel_bgr) - else: - print( - "Starting frame index different from 0," - " closing without reading...", - ) - - # ----------------------------------------- - # release capture and close video writer - # ----------------------------------------- - cap.release() - videowriter.release() - - -if __name__ == "__main__": - # ------------------------------------ - # parse command line arguments - # ------------------- - parser = argparse.ArgumentParser() - parser.add_argument("--model", help="restore checkpoint") - parser.add_argument( - "--input_dir", - help="directory with input videos for evaluation", - ) - parser.add_argument("--output_dir", help="directory to save output to") - parser.add_argument("--small", action="store_true", help="use small model") - parser.add_argument( - "--mixed_precision", - action="store_true", - help="use mixed precision", - ) - parser.add_argument( - "--alternate_corr", - action="store_true", - help="use efficent correlation implementation", - ) - parser.add_argument( - "--step_frames", - type=int, - nargs="?", - const=1, - default=1, - help="compute optical flow between these number of frames", - ) - args = parser.parse_args() - - run_model_on_video(args) diff --git a/crabs/dense_optical _flow/notebook_RAFT.py b/crabs/dense_optical _flow/notebook_RAFT.py deleted file mode 100644 index 504eb004..00000000 --- a/crabs/dense_optical _flow/notebook_RAFT.py +++ /dev/null @@ -1,316 +0,0 @@ -""" -DRAFT_crabs.ipynb. - -Automatically generated by Colaboratory. - -Original file is located at - https://colab.research.google.com/drive/1-XuJDgovJPX54qbaEnBzidV4dmMPsU1C - -# 1. Clone the RAFT repo - -# ---> run !git clone https://github.com/princeton-vl/RAFT.git - -# ---> run !pip install pathlib - -# 2. Install conda and dependencies - -See: -https://inside-machinelearning.com/en/how-to-install-use-conda-on-google-colab/ - -Only base environment: https://github.com/conda-incubator/condacolab/tree/0.1.x - -The code has been tested with PyTorch 1.6 and Cuda 10.1. - -""" - -# ---> run !pip install -q condacolab - - -# ---> run !conda --version -# If !conda --version returns no results, install conda with : -# !pip install -q condacolab - -# You can only update base virtual environment - -# ---> run !conda env list - -# ---> run !conda env update -n base -f environment.yml - -# ---> run -# !conda install pytorch=1.6.0 torchvision=0.7.0 cudatoolkit=10.1 -# matplotlib tensorboard scipy opencv -c pytorch - -################## - -# ---> run !conda list - -# !conda install python=3.7 pytorch=1.6.0 -c pytorch - -# !conda install opencv - -# !conda install tensorboard scipy matplotlib - -# !conda install cudatoolkit=10.1 - -# !conda install pytorch=1.6.0 torchvision=0.7.0 -c pytorch - -# 3. Run inference - -# Download pretrained models - - -# Commented out IPython magic to ensure Python compatibility. -# %%bash -# RAFT/download_models.sh - -# Run inference on sequence of frames - -# !python RAFT/demo.py --model=models/raft-kitti.pth --path=RAFT/demo-frames - -import glob -import os -import sys -from pathlib import Path - -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import torch -from PIL import Image - -sys.path.append("RAFT/core") # TODO: maybe not needed? - -from RAFT.core.raft import RAFT # noqa: E402 -from RAFT.core.utils import flow_viz # noqa: E402 -from RAFT.core.utils.utils import InputPadder # noqa: E402 - -print(torch.__version__) -print(torch.cuda.is_available()) - -# utils - -DEVICE = "cuda" - - -def load_image(imfile): - img = np.array(Image.open(imfile)).astype(np.uint8) - img = torch.from_numpy(img).permute(2, 0, 1).float() - return img[None].to(DEVICE) - - -def opencv_cap_to_torch_tensor(opencv_cap_frame): - # opencv cap returns numpy array of size (h, w, channel) - img = np.array(opencv_cap_frame).astype( - np.uint8, - ) # TODO: it is already np.array do I need to make it np.unit8? - img = torch.from_numpy(img).permute(2, 0, 1).float() - return img[None].to(DEVICE) - - -def viz(img, flo, img_filename): - img = img[0].permute(1, 2, 0).cpu().numpy() # BRG ---> RGB - flo = flo[0].permute(1, 2, 0).cpu().numpy() - - # map flow to rgb image - flo = flow_viz.flow_to_image(flo) - img_flo = np.concatenate([img, flo], axis=1) - - plt.imshow(img_flo / 255.0) - plt.show() - - img_saved_bool = cv2.imwrite( - f"output/flow_{Path(img_filename).name}", - img_flo[:, :, [2, 1, 0]], - ) # RGB ---> BRG - if not img_saved_bool: - print(f"Flow image for {Path(img_filename).name} not saved") - - -def demo(args): - model = torch.nn.DataParallel(RAFT(args)) - model.load_state_dict(torch.load(args.model)) - - model = model.module - model.to(DEVICE) - model.eval() - - with torch.no_grad(): - images = glob.glob(os.path.join(args.path, "*.png")) + glob.glob( - os.path.join(args.path, "*.jpg"), - ) - - images = sorted(images) - for imfile1, imfile2 in zip(images[:-1], images[1:]): - print(imfile1) - image1 = load_image(imfile1) - print(image1.shape) - print(type(image1)) - image2 = load_image(imfile2) - - padder = InputPadder(image1.shape) - image1, image2 = padder.pad(image1, image2) - - flow_low, flow_up = model(image1, image2, iters=20, test_mode=True) - # TODO what is is flow_low: image in frame f+1 to image in frame f? - viz(image1, flow_up, imfile1) - - -# def demo_video(args): -# # initialise model - - -# # initialise video capture - -# # initialise video writer -# frame_rate, - -# with torch.no_grad(): - -# # ensure we start capture at 0 -# if cap.get(cv2.CAP_PROP_POS_FRAMES) == 0: - -# while frame_idx < nframes-1: # skip the last frame! -# # set position (does this work?)--------------------- - -# # read frame 1 -# # set next starting position as frame 1 -# # read frame 2 - -# # frame_1 is - -# # if at least one of them is not successfully read, exit -# if not any([success_frame_1, success_frame_2]): -# print( -# "At least one frame was not read correctly. Exiting ... -# ") - - -# # make them torch tensors -# # torch tensor of torch.Size([1, 3, 436, 1024]) (h x w) - -# # pad images (why?)--------- - -# # compute flow - -# # save to writer - -# # map flow to rgb image - - -# # # TODO what is is flow_low: -# # image in frame f+1 to image in frame f? - - -# https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary -class dotdict(dict): - """dot.notation access to dictionary attributes.""" - - __getattr__ = dict.get - __setattr__ = dict.__setitem__ # type: ignore[assignment] - __delattr__ = dict.__delitem__ # type: ignore[assignment] - - -# simulate arg parse -args = { - "model": "models/raft-kitti.pth", - "path": "RAFT/demo-frames", - "small": False, - "mixed_precision": False, - "alternate_corr": False, -} - -args = dotdict(args) - -# ---> run !mkdir output -demo(args) - -# ---> run !rm -r /content/output/ - -############# - -# simulate arg parse -args = { - "model": "models/raft-kitti.pth", - "path": "/content/NINJAV_S001_S001_T003_subclip.mp4", - "small": False, - "mixed_precision": False, - "alternate_corr": False, -} - -args = dotdict(args) - -# initialise model -model = torch.nn.DataParallel(RAFT(args)) -model.load_state_dict(torch.load(args.model)) # type: ignore[attr-defined] - -model = model.module -model.to(DEVICE) -model.eval() - -# initialise video capture -cap = cv2.VideoCapture(str(args.path)) # type: ignore[attr-defined] - -nframes = cap.get(cv2.CAP_PROP_FRAME_COUNT) # 9000 -width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # 1920.0 -height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # 1080.0 -frame_rate = cap.get(cv2.CAP_PROP_FPS) # 25fps - -# initialise video writer -videowriter = cv2.VideoWriter( - f"{Path(args.path).stem}_flow.mp4", # type: ignore[attr-defined] - cv2.VideoWriter_fourcc("m", "p", "4", "v"), - frame_rate, - tuple(int(x) for x in (width, height)), -) - -# mmmm quite slow..... -with torch.no_grad(): - # ensure we start capture at 0 - if cap.get(cv2.CAP_PROP_POS_FRAMES) == 0: - frame_idx = 0 - - while frame_idx < nframes - 1: # skip the last frame! - # set position to the desired index - cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx) - print(f"focus frame: {cap.get(cv2.CAP_PROP_POS_FRAMES)}") - - # read frame 1 - success_frame_1, frame_1 = cap.read() - # the index to read next is now at frame 2, - # set it as the next starting position - frame_idx = cap.get(cv2.CAP_PROP_POS_FRAMES) - # read frame 2 - success_frame_2, frame_2 = cap.read() - - # if at least one of them is not successfully read, exit - if not any([success_frame_1, success_frame_2]): - print("At least one frame was not read correctly. Exiting ...") - break - - # make them torch tensors - image1 = opencv_cap_to_torch_tensor(frame_1) - # In example: - # output is torch tensor of torch.Size([1, 3, 436, 1024]) - # (h x w) - image2 = opencv_cap_to_torch_tensor(frame_2) - - # pad images (why?)--------- - padder = InputPadder(image1.shape) - image1, image2 = padder.pad(image1, image2) - - # compute flow - # output has batch channel on the first dim - flow_low, flow_up = model(image1, image2, iters=20, test_mode=True) - - # convert output to numpy array and reorder channels - img = image1[0].permute(1, 2, 0).cpu().numpy() # c,h,w --> h,w, c - flo = flow_up[0].permute(1, 2, 0).cpu().numpy() - - # map flow to rgb image - flo = flow_viz.flow_to_image(flo) - - videowriter.write(flo[:, :, [2, 1, 0]]) # BRG ---> RGB - -cap.release() -videowriter.release() -cv2.destroyAllWindows() diff --git a/crabs/dense_optical _flow/run_RAFT_on_videos.sh b/crabs/dense_optical _flow/run_RAFT_on_videos.sh deleted file mode 100644 index 19eef508..00000000 --- a/crabs/dense_optical _flow/run_RAFT_on_videos.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -#SBATCH -p gpu # partition -#SBATCH -N 1 # number of nodes -#SBATCH --mem 12G # memory pool for all cores -#SBATCH -n 2 # number of cores -#SBATCH -t 3-00:00 # time (D-HH:MM) -#SBATCH --gres gpu:1 # request 1 GPU (of any kind) -#SBATCH -o slurm.%N.%j.out # write STDOUT -#SBATCH -e slurm.%N.%j.err # write STDERR -#SBATCH --mail-type=ALL -#SBATCH --mail-user=s.minano@ucl.ac.uk - -# --------------------- -# Load required modules -# ---------------------- -module load cuda -module load miniconda - -# ------------------- -# Clone repo -# ------------------- -SCRATCH_CRABS_DIR=/ceph/scratch/sminano/crabs_optical_flow -cd $SCRATCH_CRABS_DIR - -git clone https://github.com/princeton-vl/RAFT.git -RAFT_REPO_ROOT_DIR=$SCRATCH_CRABS_DIR/RAFT - - -# -------------------------- -# Set up conda environment -# --------------------------- -# pip install pathlib -conda create --name raft -conda activate raft -conda install pytorch torchvision cudatoolkit -c pytorch -conda install scipy - -# the following with pip because I get an error with conda -# that requires to update base conda...which is common in -# the cluster? -pip install opencv-python -pip install matplotlib -pip install tensorboard -pip install pathlib - -# ------------------- -# Run python script -# ------------------- -# Input data -# NINJAV_S001_S001_T003_subclip.mp4 -INPUT_DATA_DIR=$SCRATCH_CRABS_DIR/data/ -# output dir -OUTPUT_DIR=$SCRATCH_CRABS_DIR/output/ - -# Download models -cd $RAFT_REPO_ROOT_DIR -./download_models.sh -MODEL_PATH=$RAFT_REPO_ROOT_DIR/models/raft-kitti.pth - -# copy data and models from scratch to temp? (faster) - -# run python script -cd .. -STEP_FRAMES=10 -python estimate_optical_flow_on_video.py \ - --model $MODEL_PATH \ - --input_dir $INPUT_DATA_DIR \ - --output_dir $OUTPUT_DIR \ - --step_frames $STEP_FRAMES diff --git a/crabs/stereo_calibration/CalibrationDataCollection.md b/guides/CalibrationDataCollection.md similarity index 100% rename from crabs/stereo_calibration/CalibrationDataCollection.md rename to guides/CalibrationDataCollection.md diff --git a/crabs/bboxes_labelling/guides/ExtractFramesCluster.md b/guides/ExtractFramesCluster.md similarity index 100% rename from crabs/bboxes_labelling/guides/ExtractFramesCluster.md rename to guides/ExtractFramesCluster.md diff --git a/crabs/bboxes_labelling/guides/ManualLabellingSteps_dev.md b/guides/ManualLabellingSteps_dev.md similarity index 100% rename from crabs/bboxes_labelling/guides/ManualLabellingSteps_dev.md rename to guides/ManualLabellingSteps_dev.md diff --git a/crabs/bboxes_labelling/guides/ManualLabellingSteps_general.md b/guides/ManualLabellingSteps_general.md similarity index 100% rename from crabs/bboxes_labelling/guides/ManualLabellingSteps_general.md rename to guides/ManualLabellingSteps_general.md diff --git a/crabs/pose_estimation/cluster bash scripts/howto_inference_cluster.md b/guides/pose_estimation/howto_inference_cluster.md similarity index 100% rename from crabs/pose_estimation/cluster bash scripts/howto_inference_cluster.md rename to guides/pose_estimation/howto_inference_cluster.md diff --git a/crabs/pose_estimation/cluster bash scripts/howto_training_cluster.md b/guides/pose_estimation/howto_training_cluster.md similarity index 100% rename from crabs/pose_estimation/cluster bash scripts/howto_training_cluster.md rename to guides/pose_estimation/howto_training_cluster.md diff --git a/crabs/opencv_notebooks/notebook_clip_video.py b/opencv_notebooks/notebook_clip_video.py similarity index 100% rename from crabs/opencv_notebooks/notebook_clip_video.py rename to opencv_notebooks/notebook_clip_video.py diff --git a/crabs/opencv_notebooks/notebook_downsample.py b/opencv_notebooks/notebook_downsample.py similarity index 100% rename from crabs/opencv_notebooks/notebook_downsample.py rename to opencv_notebooks/notebook_downsample.py diff --git a/crabs/opencv_notebooks/notebook_set_video_at_frame.py b/opencv_notebooks/notebook_set_video_at_frame.py similarity index 100% rename from crabs/opencv_notebooks/notebook_set_video_at_frame.py rename to opencv_notebooks/notebook_set_video_at_frame.py diff --git a/crabs/opencv_notebooks/notebook_video_params.py b/opencv_notebooks/notebook_video_params.py similarity index 100% rename from crabs/opencv_notebooks/notebook_video_params.py rename to opencv_notebooks/notebook_video_params.py