From 22d6d74f93bee253416de927b9a90464444f49cd Mon Sep 17 00:00:00 2001 From: Robin Narsingh Ranabhat Date: Tue, 10 Jan 2023 18:58:08 +0545 Subject: [PATCH] Easy Finetuning and import-error addressed --- README.md | 173 +++----------------------------------- args_eval.json | 59 +++++++++++++ datasets/coco.py | 6 +- how_to.md | 56 ++++++++++++ install-driver.sh | 158 ++++++++++++++++++++++++++++++++++ main.py | 30 ++++--- models/deformable_detr.py | 4 +- util/misc.py | 86 +++++++++---------- 8 files changed, 352 insertions(+), 220 deletions(-) create mode 100644 args_eval.json create mode 100644 how_to.md create mode 100644 install-driver.sh diff --git a/README.md b/README.md index c9db563..5aa872e 100644 --- a/README.md +++ b/README.md @@ -1,169 +1,16 @@ -# Deformable DETR - -By [Xizhou Zhu](https://scholar.google.com/citations?user=02RXI00AAAAJ), [Weijie Su](https://www.weijiesu.com/), [Lewei Lu](https://www.linkedin.com/in/lewei-lu-94015977/), [Bin Li](http://staff.ustc.edu.cn/~binli/), [Xiaogang Wang](http://www.ee.cuhk.edu.hk/~xgwang/), [Jifeng Dai](https://jifengdai.org/). - -This repository is an official implementation of the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159). - - -## Introduction - -**TL; DR.** Deformable DETR is an efficient and fast-converging end-to-end object detector. It mitigates the high complexity and slow convergence issues of DETR via a novel sampling-based efficient attention mechanism. - -![deformable_detr](./figs/illustration.png) - -![deformable_detr](./figs/convergence.png) - -**Abstract.** DETR has been recently proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance. However, it suffers from slow convergence and limited feature spatial resolution, due to the limitation of Transformer attention modules in processing image feature maps. To mitigate these issues, we proposed Deformable DETR, whose attention modules only attend to a small set of key sampling points around a reference. Deformable DETR can achieve better performance than DETR (especially on small objects) with 10× less training epochs. Extensive experiments on the COCO benchmark demonstrate the effectiveness of our approach. - -## License - -This project is released under the [Apache 2.0 license](./LICENSE). - -## Changelog - -See [changelog.md](./docs/changelog.md) for detailed logs of major changes. - - -## Citing Deformable DETR -If you find Deformable DETR useful in your research, please consider citing: -```bibtex -@article{zhu2020deformable, - title={Deformable DETR: Deformable Transformers for End-to-End Object Detection}, - author={Zhu, Xizhou and Su, Weijie and Lu, Lewei and Li, Bin and Wang, Xiaogang and Dai, Jifeng}, - journal={arXiv preprint arXiv:2010.04159}, - year={2020} -} -``` - -## Main Results - -| Method | Epochs | AP | APS | APM | APL | params
(M)
| FLOPs
(G)
| Total
Train
Time
(GPU
hours)
| Train
Speed
(GPU
hours
/epoch)
| Infer
Speed
(FPS)
| Batch
Infer
Speed
(FPS)
| URL | -| ----------------------------------- | :----: | :--: | :----: | :---: | :------------------------------: | :--------------------:| :----------------------------------------------------------: | :--: | :---: | :---: | ----- | ----- | -| Faster R-CNN + FPN | 109 | 42.0 | 26.6 | 45.4 | 53.4 | 42 | 180 | 380 | 3.5 | 25.6 | 28.0 | - | -| DETR | 500 | 42.0 | 20.5 | 45.8 | 61.1 | 41 | 86 | 2000 | 4.0 | 27.0 | 38.3 | - | -| DETR-DC5 | 500 | 43.3 | 22.5 | 47.3 | 61.1 | 41 |187|7000|14.0|11.4|12.4| - | -| DETR-DC5 | 50 | 35.3 | 15.2 | 37.5 | 53.6 | 41 |187|700|14.0|11.4|12.4| - | -| DETR-DC5+ | 50 | 36.2 | 16.3 | 39.2 | 53.9 | 41 |187|700|14.0|11.4|12.4| - | -| **Deformable DETR
(single scale)
** | 50 | 39.4 | 20.6 | 43.0 | 55.5 | 34 |78|160|3.2|27.0|42.4| [config](./configs/r50_deformable_detr_single_scale.sh)
[log](https://drive.google.com/file/d/1n3ZnZ-UAqmTUR4AZoM4qQntIDn6qCZx4/view?usp=sharing)
[model](https://drive.google.com/file/d/1WEjQ9_FgfI5sw5OZZ4ix-OKk-IJ_-SDU/view?usp=sharing)
| -| **Deformable DETR
(single scale, DC5)
** | 50 | 41.5 | 24.1 | 45.3 | 56.0 | 34 |128|215|4.3|22.1|29.4| [config](./configs/r50_deformable_detr_single_scale_dc5.sh)
[log](https://drive.google.com/file/d/1-UfTp2q4GIkJjsaMRIkQxa5k5vn8_n-B/view?usp=sharing)
[model](https://drive.google.com/file/d/1m_TgMjzH7D44fbA-c_jiBZ-xf-odxGdk/view?usp=sharing)
| -| **Deformable DETR** | 50 | 44.5 | 27.1 | 47.6 | 59.6 | 40 |173|325|6.5|15.0|19.4|[config](./configs/r50_deformable_detr.sh)
[log](https://drive.google.com/file/d/18YSLshFjc_erOLfFC-hHu4MX4iyz1Dqr/view?usp=sharing)
[model](https://drive.google.com/file/d/1nDWZWHuRwtwGden77NLM9JoWe-YisJnA/view?usp=sharing)
| -| **+ iterative bounding box refinement** | 50 | 46.2 | 28.3 | 49.2 | 61.5 | 41 |173|325|6.5|15.0|19.4|[config](./configs/r50_deformable_detr_plus_iterative_bbox_refinement.sh)
[log](https://drive.google.com/file/d/1DFNloITi1SFBWjYzvVEAI75ndwmGM1Uj/view?usp=sharing)
[model](https://drive.google.com/file/d/1JYKyRYzUH7uo9eVfDaVCiaIGZb5YTCuI/view?usp=sharing)
| -| **++ two-stage Deformable DETR** | 50 | 46.9 | 29.6 | 50.1 | 61.6 | 41 |173|340|6.8|14.5|18.8|[config](./configs/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage.sh)
[log](https://drive.google.com/file/d/1ozi0wbv5-Sc5TbWt1jAuXco72vEfEtbY/view?usp=sharing)
[model](https://drive.google.com/file/d/15I03A7hNTpwuLNdfuEmW9_taZMNVssEp/view?usp=sharing)
| - -*Note:* - -1. All models of Deformable DETR are trained with total batch size of 32. -2. Training and inference speed are measured on NVIDIA Tesla V100 GPU. -3. "Deformable DETR (single scale)" means only using res5 feature map (of stride 32) as input feature maps for Deformable Transformer Encoder. -4. "DC5" means removing the stride in C5 stage of ResNet and add a dilation of 2 instead. -5. "DETR-DC5+" indicates DETR-DC5 with some modifications, including using Focal Loss for bounding box classification and increasing number of object queries to 300. -6. "Batch Infer Speed" refer to inference with batch size = 4 to maximize GPU utilization. -7. The original implementation is based on our internal codebase. There are slight differences in the final accuracy and running time due to the plenty details in platform switch. - - -## Installation - -### Requirements - -* Linux, CUDA>=9.2, GCC>=5.4 - -* Python>=3.7 - - We recommend you to use Anaconda to create a conda environment: - ```bash - conda create -n deformable_detr python=3.7 pip - ``` - Then, activate the environment: - ```bash - conda activate deformable_detr - ``` - -* PyTorch>=1.5.1, torchvision>=0.6.1 (following instructions [here](https://pytorch.org/)) - - For example, if your CUDA version is 9.2, you could install pytorch and torchvision as following: - ```bash - conda install pytorch=1.5.1 torchvision=0.6.1 cudatoolkit=9.2 -c pytorch - ``` - -* Other requirements - ```bash - pip install -r requirements.txt - ``` - -### Compiling CUDA operators -```bash -cd ./models/ops -sh ./make.sh -# unit test (should see all checking is True) -python test.py -``` +# Deformable DETR Working Repo +This repo makes slight changes to the original Deformable-DETR repo for easy training/finetuning purposes. And also addresses some errors. ## Usage +1. First Go through [original-repo readme](https://github.com/fundamentalvision/Deformable-DETR) first for setup. -### Dataset preparation - -Please download [COCO 2017 dataset](https://cocodataset.org/) and organize them as following: - -``` -code_root/ -└── data/ - └── coco/ - ├── train2017/ - ├── val2017/ - └── annotations/ - ├── instances_train2017.json - └── instances_val2017.json -``` - -### Training - -#### Training on single node - -For example, the command for training Deformable DETR on 8 GPUs is as following: - -```bash -GPUS_PER_NODE=8 ./tools/run_dist_launch.sh 8 ./configs/r50_deformable_detr.sh -``` - -#### Training on multiple nodes - -For example, the command for training Deformable DETR on 2 nodes of each with 8 GPUs is as following: - -On node 1: - -```bash -MASTER_ADDR= NODE_RANK=0 GPUS_PER_NODE=8 ./tools/run_dist_launch.sh 16 ./configs/r50_deformable_detr.sh -``` - -On node 2: - -```bash -MASTER_ADDR= NODE_RANK=1 GPUS_PER_NODE=8 ./tools/run_dist_launch.sh 16 ./configs/r50_deformable_detr.sh -``` - -#### Training on slurm cluster - -If you are using slurm cluster, you can simply run the following command to train on 1 node with 8 GPUs: - -```bash -GPUS_PER_NODE=8 ./tools/run_dist_slurm.sh deformable_detr 8 configs/r50_deformable_detr.sh -``` - -Or 2 nodes of each with 8 GPUs: - -```bash -GPUS_PER_NODE=8 ./tools/run_dist_slurm.sh deformable_detr 16 configs/r50_deformable_detr.sh -``` -#### Some tips to speed-up training -* If your file system is slow to read images, you may consider enabling '--cache_mode' option to load whole dataset into memory at the beginning of training. -* You may increase the batch size to maximize the GPU utilization, according to GPU memory of yours, e.g., set '--batch_size 3' or '--batch_size 4'. - -### Evaluation - -You can get the config file and pretrained model of Deformable DETR (the link is in "Main Results" session), then run following command to evaluate it on COCO 2017 validation set: - +NOTE : +Do this Before the `Compiling CUDA operators` in above README. ```bash - --resume --eval +# First check nvidia-driver exists +nvidia-smi +# Incase you are using gcloud(debian machine) and `nvidia-smi` command is not working, run `install-driver.sh` to fresh install nvidia-driver. Not sure if it works for other linux distros. +./install-driver.sh ``` -You can also run distributed evaluation by using ```./tools/run_dist_launch.sh``` or ```./tools/run_dist_slurm.sh```. +2. After you have setup the environment, checkout [how_to.md](https://github.dev/robinnarsinghranabhat/Deformable-DETR/how_to.md) \ No newline at end of file diff --git a/args_eval.json b/args_eval.json new file mode 100644 index 0000000..bc5d6ae --- /dev/null +++ b/args_eval.json @@ -0,0 +1,59 @@ +{"lr": 0.0002, + "lr_backbone_names": ["backbone.0"], + "lr_backbone": 2e-05, + "lr_linear_proj_names": ["reference_points", "sampling_offsets"], + "lr_linear_proj_mult": 0.1, + "batch_size": 2, + "weight_decay": 0.0001, + "epochs": 50, + "lr_drop": 40, + "lr_drop_epochs": null, + "clip_max_norm": 0.1, + "sgd": false, + "frozen_weights": null, + + "backbone": "resnet50", + "dilation": false, + "position_embedding": "sine", + "position_embedding_scale": 6.283185307179586, + "num_feature_levels": 4, + "enc_layers": 6, + "dec_layers": 6, + "dim_feedforward": 1024, + "hidden_dim": 256, + "dropout": 0.1, + "nheads": 8, + "num_queries": 300, + "dec_n_points": 4, + "enc_n_points": 4, + "masks": false, + "aux_loss": true, + "set_cost_class": 2, + "set_cost_bbox": 5, + "set_cost_giou": 2, + "mask_loss_coef": 1, + "dice_loss_coef": 1, + "cls_loss_coef": 2, + "bbox_loss_coef": 5, + "giou_loss_coef": 2, + "focal_alpha": 0.25, + + "coco_panoptic_path": null, + "remove_difficult": false, + + "with_box_refine": true, + "two_stage": true, + "dataset_file": "coco", + "coco_path": "../detr_finetuning/train_accord", + "device": "cuda", + "seed": 42, + "resume": "./saved_models/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage-checkpoint.pth", + "output_dir": ".exps/iter_refine_3_class/", + "model_load_path" : "./exps/iter_refine_3_class/model.pth", + "start_epoch": 0, + + "eval": true, + "num_workers": 2, + "cache_mode": false, + "num_classes": 3 +} \ No newline at end of file diff --git a/datasets/coco.py b/datasets/coco.py index 1be8308..47e2751 100644 --- a/datasets/coco.py +++ b/datasets/coco.py @@ -157,10 +157,10 @@ def make_coco_transforms(image_set): def build(image_set, args): root = Path(args.coco_path) assert root.exists(), f'provided COCO path {root} does not exist' - mode = 'instances' + # Each key in dict below is tuple : ( Path to images, Annotation file for those images ) PATHS = { - "train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'), - "val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'), + "train": (root / "train/images", root / "train/images" / 'train.json'), + "val": (root / "valid/images", root / "valid/images" / 'valid.json'), } img_folder, ann_file = PATHS[image_set] diff --git a/how_to.md b/how_to.md new file mode 100644 index 0000000..a64d396 --- /dev/null +++ b/how_to.md @@ -0,0 +1,56 @@ +## 1. Dataset Format +Data should be in COCO format. You might need to slightly restructure your +dataset in format below : + +Keep Training images at : +`DATA_DIR/train/images`
+Keep Training images annotations/labels at : `DATA_DIR/train/images/train.json`
+Keep Validation images at : `DATA_DIR/valid/images`
+Keep Validation images annotations/labels at : `DATA_DIR/train/images/valid.json` + +( To setup paths differently, just do changes on `datasets/coco.py`. Inside the +`def build(image_set, args):`). + +## 2. Training Notes: +Assuming DATA_DIR as `custom_files`. + +Then we can finetune over a trained model as : + +`python -u main.py --output_dir exps/iter_refine/ --with_box_refine --two_stage --resume ./saved_models/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage-checkpoint.pth --coco_path ./custom_files --num_classes=3` + +**Important Model Flags** : +- `coco_path` : this will be our `DATA_DIR` +- `output_dir` : this will be where model will be saved. + +- `resume` : this flag will continue finetuning from the supplied model. Checkout available models in the Original Deformable-DETR repo. Or given enough dataset, we could even train our own model from scratch. + +- `num_classes` : + Deformable DETR is originally trained on 91 classes. Suppose, to finetune with 2 classes say, yes-checkbox and no-checkbox. + + **Set the `num_classes` to 3 (Total Labels + 1). Plus 1 is done to account for no-object class.** + + This way, Last linear layer will output 3 vectors instead of original 91 vectors. And during model-loading, + weights of last linear layer will be discarded. + + +## 3. Inference Notes +To infer using the trained model and visualize, Check the notebook : `inference.ipynb`. + +## 4. For Gcloud users [Extra] +In gcloud, using `jupyter-notebook` or `jupyter-lab` would be beneficial. +To setup jupyter-lab, these are the steps : +```bash +## make changes to instance-name, region , project name e.t.c as necessary +gcloud beta compute ssh --zone "region_name" "instance_name" --project "project_name" -- -L 8888:localhost:8888 + +# inside the remote server +conda activate your_detr_environment +conda install notebook +conda install jupyterlab +conda install ipykernel +python -m ipykernel install --user --name=name_of_kernel +# finally open the jupyter lab server +jupyter lab --no-browser --port=8888 --allow-root +# Now, click on link provided in the standard output below this line : +# To access the server, open this file in a browser: ... +``` diff --git a/install-driver.sh b/install-driver.sh new file mode 100644 index 0000000..7ea2d42 --- /dev/null +++ b/install-driver.sh @@ -0,0 +1,158 @@ +#!/bin/bash +# +# Copyright 2020 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Purpose: This script installs NVIDIA Drivers for GPU +# +# Refer the following links for NVIDIA driver installation. +# https://developer.nvidia.com/cuda-toolkit-archive +# https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/" +# https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html + +export ENV_FILE="/etc/profile.d/env.sh" +# shellcheck source=/etc/profile.d/env.sh disable=SC1091 +source "${ENV_FILE}" || exit 1 + + +function get_metadata_value() { + curl --retry 5 \ + -s \ + -f \ + -H "Metadata-Flavor: Google" \ + "http://metadata/computeMetadata/v1/$1" +} + +function get_attribute_value() { + get_metadata_value "instance/attributes/$1" +} + +function install_linux_headers() { + # Install linux headers. Note that the kernel version might be changed after + # installing gvnic version. For example: 4.19.0-8-cloud-amd64 -> + # 4.19.0-9-cloud-amd64. So we install the kernel headers for each driver + # installation. + echo "install linux headers: linux-headers-$(uname -r)" + sudo apt install -y linux-headers-"$(uname -r)" || exit 1 +} + +# Try to download driver via Web if GCS failed (Example: VPC-SC/GCS failure) +function download_driver_via_http() { + local driver_url_path=$1 + local downloaded_file=$2 + echo "Could not use Google Cloud Storage APIs to download driver. Attempting to download them directly from Nvidia." + echo "Downloading driver from URL: ${driver_url_path}" + wget -nv "${driver_url_path}" -O "${downloaded_file}" || { + echo 'Download driver via Web failed!' && + rm -f "${downloaded_file}" && + echo "${downloaded_file} deleted" + } +} + +# For Debian-like OS +function install_driver_debian() { + echo "DRIVER_VERSION: ${DRIVER_VERSION}" + local driver_installer_file_name="driver_installer.run" + local nvidia_driver_file_name="NVIDIA-Linux-x86_64-${DRIVER_VERSION}.run" + if [[ -z "${DRIVER_GCS_PATH}" ]]; then + DRIVER_GCS_PATH="gs://nvidia-drivers-us-public/tesla/${DRIVER_VERSION}" + fi + local driver_gcs_file_path=${DRIVER_GCS_PATH}/${nvidia_driver_file_name} + echo "Downloading driver from GCS location and install: ${driver_gcs_file_path}" + set +e + gsutil -q cp "${driver_gcs_file_path}" "${driver_installer_file_name}" + set -e + # Download driver via http if GCS failed. + if [[ ! -f "${driver_installer_file_name}" ]]; then + driver_url_path="http://us.download.nvidia.com/tesla/${DRIVER_VERSION}/${nvidia_driver_file_name}" + download_driver_via_http "${driver_url_path}" "${driver_installer_file_name}" + fi + + if [[ ! -f "${driver_installer_file_name}" ]]; then + echo "Failed to find drivers!" + exit 1 + fi + + chmod +x ${driver_installer_file_name} + sudo ./${driver_installer_file_name} --dkms -a -s --no-drm --install-libglvnd + rm -rf ${driver_installer_file_name} +} + +# For Ubuntu OS +function install_driver_ubuntu() { + echo "DRIVER_UBUNTU_DEB: ${DRIVER_UBUNTU_DEB}" + echo "DRIVER_UBUNTU_PKG: ${DRIVER_UBUNTU_PKG}" + if [[ -z "${DRIVER_GCS_PATH}" ]]; then + DRIVER_GCS_PATH="gs://dl-platform-public-nvidia/${DRIVER_UBUNTU_DEB}" + fi + echo "Downloading driver from GCS location and install: ${DRIVER_GCS_PATH}" + set +e + gsutil -q cp "${DRIVER_GCS_PATH}" "${DRIVER_UBUNTU_DEB}" + set -e + # Download driver via http if GCS failed. + if [[ ! -f "${DRIVER_UBUNTU_DEB}" ]]; then + driver_url_path="https://developer.download.nvidia.com/compute/cuda/${DRIVER_UBUNTU_CUDA_VERSION}/local_installers/${DRIVER_UBUNTU_DEB}" + download_driver_via_http "${driver_url_path}" "${DRIVER_UBUNTU_DEB}" + fi + if [[ ! -f "${DRIVER_UBUNTU_DEB}" ]]; then + driver_url_path="https://us.download.nvidia.com/tesla/${DRIVER_VERSION}/${DRIVER_UBUNTU_DEB}" + download_driver_via_http "${driver_url_path}" "${DRIVER_UBUNTU_DEB}" + fi + if [[ ! -f "${DRIVER_UBUNTU_DEB}" ]]; then + echo "Failed to find drivers!" + exit 1 + fi + wget -nv https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin + + sudo mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600 + dpkg -i "${DRIVER_UBUNTU_DEB}" || { + echo "Failed to install ${DRIVER_UBUNTU_DEB}..exit" + exit 1 + } + apt-key add /var/cuda-repo-*/*.pub || apt-key add /var/nvidia-driver*/*.pub || { + echo "Failed to add apt-key...exit" + exit 1 + } + sudo apt-get --allow-releaseinfo-change update + sudo apt remove -y "${DRIVER_UBUNTU_PKG}" + sudo apt -y autoremove && sudo apt install -y "${DRIVER_UBUNTU_PKG}" nvidia-modprobe + rm -rf "${DRIVER_UBUNTU_DEB}" cuda-update1804.pin +} + +function wait_apt_locks_released() { + # Wait for apt lock to be released + # Source: https://askubuntu.com/a/373478 + echo "wait apt locks released" + while sudo fuser /var/{lib/{dpkg,apt/lists},cache/apt/archives}/lock >/dev/null 2>&1 || + sudo fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1 ; do + sleep 1 + done +} + +main() { + wait_apt_locks_released + install_linux_headers + # shellcheck source=/opt/deeplearning/driver-version.sh disable=SC1091 + source "${DL_PATH}/driver-version.sh" + export DRIVER_GCS_PATH + # Custom GCS driver location via instance metadata. + DRIVER_GCS_PATH=$(get_attribute_value nvidia-driver-gcs-path) + if [[ "${OS_IMAGE_FAMILY}" == "${OS_DEBIAN9}" || "${OS_IMAGE_FAMILY}" == "${OS_DEBIAN10}" ]]; then + install_driver_debian + elif [[ "${OS_IMAGE_FAMILY}" == "${OS_UBUNTU1804}" || "${OS_IMAGE_FAMILY}" == "${OS_UBUNTU2004}" ]]; then + install_driver_ubuntu + fi + exit 0 +} + +main diff --git a/main.py b/main.py index fc6ccfa..75d01d8 100644 --- a/main.py +++ b/main.py @@ -122,6 +122,7 @@ def get_args_parser(): parser.add_argument('--eval', action='store_true') parser.add_argument('--num_workers', default=2, type=int) parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory') + parser.add_argument('--num_classes', default=91, type=int) # By default, Model was trained on 91 classes return parser @@ -129,7 +130,6 @@ def get_args_parser(): def main(args): utils.init_distributed_mode(args) print("git:\n {}\n".format(utils.get_sha())) - if args.frozen_weights is not None: assert args.masks, "Frozen training is meant for segmentation only" print(args) @@ -225,26 +225,38 @@ def match_name_keywords(n, name_keywords): model_without_ddp.detr.load_state_dict(checkpoint['model']) output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) if args.resume: if args.resume.startswith('https'): checkpoint = torch.hub.load_state_dict_from_url( args.resume, map_location='cpu', check_hash=True) else: + ## LOAD WEIGHTS INTO MODEL checkpoint = torch.load(args.resume, map_location='cpu') + # When number of classes changes, modify the model as well. Otherwise, keep original weights ! + if args.num_classes != 91 or args.dataset_file != 'coco': + print(f"Deleting last linear layer weights as num_classes is different {args.num_classes} than expected for coco (91)") + keys = list(checkpoint['model'].keys()) + for i in keys: + if 'class_embed' in i: + del checkpoint["model"][i] + else: + print("Keeping all the original weights.") missing_keys, unexpected_keys = model_without_ddp.load_state_dict(checkpoint['model'], strict=False) unexpected_keys = [k for k in unexpected_keys if not (k.endswith('total_params') or k.endswith('total_ops'))] if len(missing_keys) > 0: print('Missing Keys: {}'.format(missing_keys)) if len(unexpected_keys) > 0: print('Unexpected Keys: {}'.format(unexpected_keys)) + # import pdb; pdb.set_trace() if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint: import copy - p_groups = copy.deepcopy(optimizer.param_groups) - optimizer.load_state_dict(checkpoint['optimizer']) - for pg, pg_old in zip(optimizer.param_groups, p_groups): - pg['lr'] = pg_old['lr'] - pg['initial_lr'] = pg_old['initial_lr'] - print(optimizer.param_groups) + # p_groups = copy.deepcopy(optimizer.param_groups) + # optimizer.load_state_dict(checkpoint['optimizer']) + # for pg, pg_old in zip(optimizer.param_groups, p_groups): + # pg['lr'] = pg_old['lr'] + # pg['initial_lr'] = pg_old['initial_lr'] + # print(optimizer.param_groups) lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) # todo: this is a hack for doing experiment that resume from checkpoint and also modify lr scheduler (e.g., decrease lr in advance). args.override_resumed_lr_drop = True @@ -269,7 +281,7 @@ def match_name_keywords(n, name_keywords): print("Start training") start_time = time.time() - for epoch in range(args.start_epoch, args.epochs): + for epoch in range(args.start_epoch, args.epochs + args.start_epoch): if args.distributed: sampler_train.set_epoch(epoch) train_stats = train_one_epoch( @@ -321,6 +333,4 @@ def match_name_keywords(n, name_keywords): if __name__ == '__main__': parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()]) args = parser.parse_args() - if args.output_dir: - Path(args.output_dir).mkdir(parents=True, exist_ok=True) main(args) diff --git a/models/deformable_detr.py b/models/deformable_detr.py index f1415e8..7c7e026 100644 --- a/models/deformable_detr.py +++ b/models/deformable_detr.py @@ -442,7 +442,9 @@ def forward(self, x): def build(args): - num_classes = 20 if args.dataset_file != 'coco' else 91 + # coco has 91 classes originally. + num_classes = args.num_classes if args.num_classes != 91 else 91 + print("Building final layer with classes : ", num_classes) if args.dataset_file == "coco_panoptic": num_classes = 250 device = torch.device(args.device) diff --git a/util/misc.py b/util/misc.py index 6d4d076..4f39917 100644 --- a/util/misc.py +++ b/util/misc.py @@ -27,36 +27,36 @@ # needed due to empty tensor bug in pytorch and torchvision 0.5 import torchvision -if float(torchvision.__version__[:3]) < 0.5: - import math - from torchvision.ops.misc import _NewEmptyTensorOp - def _check_size_scale_factor(dim, size, scale_factor): - # type: (int, Optional[List[int]], Optional[float]) -> None - if size is None and scale_factor is None: - raise ValueError("either size or scale_factor should be defined") - if size is not None and scale_factor is not None: - raise ValueError("only one of size or scale_factor should be defined") - if not (scale_factor is not None and len(scale_factor) != dim): - raise ValueError( - "scale_factor shape must match input shape. " - "Input is {}D, scale_factor size is {}".format(dim, len(scale_factor)) - ) - def _output_size(dim, input, size, scale_factor): - # type: (int, Tensor, Optional[List[int]], Optional[float]) -> List[int] - assert dim == 2 - _check_size_scale_factor(dim, size, scale_factor) - if size is not None: - return size - # if dim is not 2 or scale_factor is iterable use _ntuple instead of concat - assert scale_factor is not None and isinstance(scale_factor, (int, float)) - scale_factors = [scale_factor, scale_factor] - # math.floor might return float in py2.7 - return [ - int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim) - ] -elif float(torchvision.__version__[:3]) < 0.7: - from torchvision.ops import _new_empty_tensor - from torchvision.ops.misc import _output_size +# if float(torchvision.__version__[:3]) < 0.5: +# import math +# from torchvision.ops.misc import _NewEmptyTensorOp +# def _check_size_scale_factor(dim, size, scale_factor): +# # type: (int, Optional[List[int]], Optional[float]) -> None +# if size is None and scale_factor is None: +# raise ValueError("either size or scale_factor should be defined") +# if size is not None and scale_factor is not None: +# raise ValueError("only one of size or scale_factor should be defined") +# if not (scale_factor is not None and len(scale_factor) != dim): +# raise ValueError( +# "scale_factor shape must match input shape. " +# "Input is {}D, scale_factor size is {}".format(dim, len(scale_factor)) +# ) +# def _output_size(dim, input, size, scale_factor): +# # type: (int, Tensor, Optional[List[int]], Optional[float]) -> List[int] +# assert dim == 2 +# _check_size_scale_factor(dim, size, scale_factor) +# if size is not None: +# return size +# # if dim is not 2 or scale_factor is iterable use _ntuple instead of concat +# assert scale_factor is not None and isinstance(scale_factor, (int, float)) +# scale_factors = [scale_factor, scale_factor] +# # math.floor might return float in py2.7 +# return [ +# int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim) +# ] +# elif float(torchvision.__version__[:3]) < 0.7: +# from torchvision.ops import _new_empty_tensor +# from torchvision.ops.misc import _output_size class SmoothedValue(object): @@ -487,19 +487,19 @@ def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corne This will eventually be supported natively by PyTorch, and this class can go away. """ - if float(torchvision.__version__[:3]) < 0.7: - if input.numel() > 0: - return torch.nn.functional.interpolate( - input, size, scale_factor, mode, align_corners - ) - - output_shape = _output_size(2, input, size, scale_factor) - output_shape = list(input.shape[:-2]) + list(output_shape) - if float(torchvision.__version__[:3]) < 0.5: - return _NewEmptyTensorOp.apply(input, output_shape) - return _new_empty_tensor(input, output_shape) - else: - return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) +# if float(torchvision.__version__[:3]) < 0.7: +# if input.numel() > 0: +# return torch.nn.functional.interpolate( +# input, size, scale_factor, mode, align_corners +# ) + +# output_shape = _output_size(2, input, size, scale_factor) +# output_shape = list(input.shape[:-2]) + list(output_shape) +# if float(torchvision.__version__[:3]) < 0.5: +# return _NewEmptyTensorOp.apply(input, output_shape) +# return _new_empty_tensor(input, output_shape) +# else: + return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) def get_total_grad_norm(parameters, norm_type=2):