From dd034bb6b9ad56ec5508d7b4edd442d977fcc51e Mon Sep 17 00:00:00 2001 From: ismellpillows <54971111+ismellpillows@users.noreply.github.com> Date: Wed, 19 Jul 2023 23:27:48 -0700 Subject: [PATCH 01/24] [Fix] Fix mismatch when number of people changes (#2583) --- demo/demo_skeleton.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/demo/demo_skeleton.py b/demo/demo_skeleton.py index b7a6173216..30d05ed917 100644 --- a/demo/demo_skeleton.py +++ b/demo/demo_skeleton.py @@ -164,8 +164,9 @@ def main(): keypoint_score = np.zeros((num_frame, num_person, num_keypoint), dtype=np.float16) for i, poses in enumerate(pose_results): - keypoint[i] = poses['keypoints'] - keypoint_score[i] = poses['keypoint_scores'] + num_current_person = len(poses['keypoints']) + keypoint[i, :num_current_person] = poses['keypoints'] + keypoint_score[i, :num_current_person] = poses['keypoint_scores'] fake_anno['keypoint'] = keypoint.transpose((1, 0, 2, 3)) fake_anno['keypoint_score'] = keypoint_score.transpose((1, 0, 2)) From c5487888f466cd7a8c8964538dad685e26a8b4e8 Mon Sep 17 00:00:00 2001 From: wxDai Date: Thu, 20 Jul 2023 14:31:44 +0800 Subject: [PATCH 02/24] [Update] Update audio-based model (#2570) --- configs/_base_/models/tsn_r18_audio.py | 11 -- configs/recognition_audio/resnet/README.md | 17 +- configs/recognition_audio/resnet/metafile.yml | 12 +- ...0-64x1x1-100e_kinetics400-audio-feature.py | 64 ++++--- ...18_8xb320-64x1x1-100e_kinetics400-audio.py | 100 ---------- mmaction/datasets/audio_dataset.py | 47 ++--- mmaction/datasets/transforms/__init__.py | 46 +++-- mmaction/datasets/transforms/formatting.py | 17 +- mmaction/datasets/transforms/loading.py | 175 ++++-------------- mmaction/datasets/transforms/processing.py | 111 ----------- tests/datasets/transforms/test_loading.py | 39 +--- tests/datasets/transforms/test_processing.py | 62 +------ tools/data/build_audio_features.py | 21 ++- 13 files changed, 145 insertions(+), 577 deletions(-) delete mode 100644 configs/_base_/models/tsn_r18_audio.py delete mode 100644 configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio.py diff --git a/configs/_base_/models/tsn_r18_audio.py b/configs/_base_/models/tsn_r18_audio.py deleted file mode 100644 index be21b44c0b..0000000000 --- a/configs/_base_/models/tsn_r18_audio.py +++ /dev/null @@ -1,11 +0,0 @@ -# model settings -model = dict( - type='RecognizerAudio', - backbone=dict(type='ResNet', depth=18, in_channels=1, norm_eval=False), - cls_head=dict( - type='TSNAudioHead', - num_classes=400, - in_channels=512, - dropout_ratio=0.5, - init_std=0.01, - average_clips='prob')) diff --git a/configs/recognition_audio/resnet/README.md b/configs/recognition_audio/resnet/README.md index f6386e313f..3a58b201c7 100644 --- a/configs/recognition_audio/resnet/README.md +++ b/configs/recognition_audio/resnet/README.md @@ -8,7 +8,7 @@ -We present Audiovisual SlowFast Networks, an architecture for integrated audiovisual perception. AVSlowFast has Slow and Fast visual pathways that are deeply inte- grated with a Faster Audio pathway to model vision and sound in a unified representation. We fuse audio and vi- sual features at multiple layers, enabling audio to con- tribute to the formation of hierarchical audiovisual con- cepts. To overcome training difficulties that arise from dif- ferent learning dynamics for audio and visual modalities, we introduce DropPathway, which randomly drops the Au- dio pathway during training as an effective regularization technique. Inspired by prior studies in neuroscience, we perform hierarchical audiovisual synchronization to learn joint audiovisual features. We report state-of-the-art results on six video action classification and detection datasets, perform detailed ablation studies, and show the gener- alization of AVSlowFast to learn self-supervised audiovi- sual features. Code will be made available at: https: //github.com/facebookresearch/SlowFast. +We present Audiovisual SlowFast Networks, an architecture for integrated audiovisual perception. AVSlowFast has Slow and Fast visual pathways that are deeply integrated with a Faster Audio pathway to model vision and sound in a unified representation. We fuse audio and visual features at multiple layers, enabling audio to contribute to the formation of hierarchical audiovisual concepts. To overcome training difficulties that arise from different learning dynamics for audio and visual modalities, we introduce DropPathway, which randomly drops the Au- dio pathway during training as an effective regularization technique. Inspired by prior studies in neuroscience, we perform hierarchical audiovisual synchronization to learn joint audiovisual features. We report state-of-the-art results on six video action classification and detection datasets, perform detailed ablation studies, and show the generalization of AVSlowFast to learn self-supervised audiovisual features. @@ -20,16 +20,9 @@ We present Audiovisual SlowFast Networks, an architecture for integrated audiovi ### Kinetics-400 -| frame sampling strategy | n_fft | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :---: | :--: | :------: | :------: | :------: | :------: | :--------------: | :--------: | :------------------------------------: | :----------------------------------: | :----------------------------------: | -| 64x1x1 | 1024 | 8 | Resnet18 | None | 19.7 | 35.75 | 10 clips | 1897 | [config](/configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature_20201012-bf34df6c.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature.log) | - -1. The **gpus** indicates the number of gpus we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. -2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. - -For more details on data preparation, you can refer to `Prepare audio` in [Data Preparation Tutorial](/docs/en/user_guides/prepare_dataset.md). +| frame sampling strategy | n_fft | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :---: | :--: | :------: | :------: | :------: | :------: | :--------------: | :---: | :----: | :------------------------------------: | :----------------------------------: | :---------------------------------: | +| 64x1x1 | 1024 | 8 | Resnet18 | None | 13.7 | 27.3 | 1 clips | 0.37G | 11.4M | [config](/configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature_20230702-e4642fb0.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature.log) | ## Train @@ -43,7 +36,7 @@ Example: train ResNet model on Kinetics-400 audio dataset in a deterministic opt ```shell python tools/train.py configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed 0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/train_test.md). diff --git a/configs/recognition_audio/resnet/metafile.yml b/configs/recognition_audio/resnet/metafile.yml index f82d234e9a..26f495cd9e 100644 --- a/configs/recognition_audio/resnet/metafile.yml +++ b/configs/recognition_audio/resnet/metafile.yml @@ -11,16 +11,20 @@ Models: In Collection: Audio Metadata: Architecture: ResNet18 + Batch Size: 320 + Epochs: 100 + FLOPs: 0.37G + Parameters: 11.4M Pretrained: None + n_fft: 1024 Training Data: Kinetics-400 Training Resources: 8 GPUs - n_fft: 1024 Modality: Audio Results: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 19.7 - Top 5 Accuracy: 35.75 + Top 1 Accuracy: 13.7 + Top 5 Accuracy: 27.3 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature.log - Weights: https://download.openmmlab.com/mmaction/v1.0/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature_20201012-bf34df6c.pth + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature_20230702-e4642fb0.pth diff --git a/configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature.py b/configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature.py index 8a37ab5bad..9b00c34796 100644 --- a/configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature.py +++ b/configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature.py @@ -1,14 +1,24 @@ -_base_ = [ - '../../_base_/models/tsn_r18_audio.py', '../../_base_/default_runtime.py' -] +_base_ = '../../_base_/default_runtime.py' + +# model settings +model = dict( + type='RecognizerAudio', + backbone=dict(type='ResNet', depth=18, in_channels=1, norm_eval=False), + cls_head=dict( + type='TSNAudioHead', + num_classes=400, + in_channels=512, + dropout_ratio=0.5, + init_std=0.01, + average_clips='prob')) # dataset settings dataset_type = 'AudioDataset' -data_root = 'data/kinetics400/audio_features_train' -data_root_val = 'data/kinetics400/audio_features_val' -ann_file_train = 'data/kinetics400/kinetics400_val_list_audio_features.txt' -ann_file_val = 'data/kinetics400/kinetics400_val_list_audio_features.txt' -ann_file_test = 'data/kinetics400/kinetics400_val_list_audio_features.txt' +data_root = 'data/kinetics400' +ann_file_train = 'kinetics400_train_list_audio_features.txt' +ann_file_val = 'kinetics400_val_list_audio_features.txt' +ann_file_test = 'kinetics400_val_list_audio_features.txt' + train_pipeline = [ dict(type='LoadAudioFeature'), dict(type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1), @@ -28,53 +38,42 @@ dict(type='FormatAudioShape', input_format='NCTF'), dict(type='PackActionInputs') ] -test_pipeline = [ - dict(type='LoadAudioFeature'), - dict( - type='SampleFrames', - clip_len=64, - frame_interval=1, - num_clips=10, - test_mode=True), - dict(type='AudioFeatureSelector'), - dict(type='FormatAudioShape', input_format='NCTF'), - dict(type='PackActionInputs') -] +test_pipeline = val_pipeline train_dataloader = dict( batch_size=320, - num_workers=2, + num_workers=4, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( type=dataset_type, ann_file=ann_file_train, - data_prefix=dict(audio=data_root_val), - suffix='.npy', - pipeline=train_pipeline)) + pipeline=train_pipeline, + data_root=data_root, + data_prefix=dict(audio='audio_features_train'))) val_dataloader = dict( batch_size=320, - num_workers=2, + num_workers=4, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, ann_file=ann_file_val, pipeline=val_pipeline, - data_prefix=dict(audio=data_root_val), - suffix='.npy', + data_root=data_root, + data_prefix=dict(audio='audio_features_val'), test_mode=True)) test_dataloader = dict( batch_size=1, - num_workers=2, + num_workers=4, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, ann_file=ann_file_test, pipeline=test_pipeline, - data_prefix=dict(audio=data_root_val), - suffix='.npy', + data_root=data_root, + data_prefix=dict(audio='audio_features_val'), test_mode=True)) val_evaluator = dict(type='AccMetric') @@ -90,8 +89,7 @@ ] optim_wrapper = dict( - optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001), + optimizer=dict(type='SGD', lr=0.2, momentum=0.9, weight_decay=0.0001), clip_grad=dict(max_norm=40, norm_type=2)) -default_hooks = dict( - checkpoint=dict(max_keep_ckpts=3, interval=5), logger=dict(interval=20)) +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3, interval=5)) diff --git a/configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio.py b/configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio.py deleted file mode 100644 index ccae1b251f..0000000000 --- a/configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio.py +++ /dev/null @@ -1,100 +0,0 @@ -_base_ = [ - '../../_base_/models/tsn_r18_audio.py', '../../_base_/default_runtime.py' -] - -# dataset settings -dataset_type = 'AudioDataset' -data_root = 'data/kinetics400/audios_train' -data_root_val = 'data/kinetics400/audios_val' -ann_file_train = 'data/kinetics400/kinetics400_train_list_audios.txt' -ann_file_val = 'data/kinetics400/kinetics400_val_list_audios.txt' -ann_file_test = 'data/kinetics400/kinetics400_val_list_audios.txt' -train_pipeline = [ - dict(type='AudioDecodeInit'), - dict(type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1), - dict(type='AudioDecode'), - dict(type='AudioAmplify', ratio=1.5), - dict(type='MelSpectrogram'), - dict(type='FormatAudioShape', input_format='NCTF'), - dict(type='PackActionInputs') -] -val_pipeline = [ - dict(type='AudioDecodeInit'), - dict( - type='SampleFrames', - clip_len=64, - frame_interval=1, - num_clips=1, - test_mode=True), - dict(type='AudioDecode'), - dict(type='AudioAmplify', ratio=1.5), - dict(type='MelSpectrogram'), - dict(type='FormatAudioShape', input_format='NCTF'), - dict(type='PackActionInputs') -] -test_pipeline = [ - dict(type='AudioDecodeInit'), - dict( - type='SampleFrames', - clip_len=64, - frame_interval=1, - num_clips=10, - test_mode=True), - dict(type='AudioDecode'), - dict(type='AudioAmplify', ratio=1.5), - dict(type='MelSpectrogram'), - dict(type='FormatAudioShape', input_format='NCTF'), - dict(type='PackActionInputs') -] - -train_dataloader = dict( - batch_size=320, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - ann_file=ann_file_train, - data_prefix=dict(audio=data_root), - pipeline=train_pipeline)) -val_dataloader = dict( - batch_size=320, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_val, - pipeline=val_pipeline, - data_prefix=dict(audio=data_root_val), - test_mode=True)) -test_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_test, - pipeline=test_pipeline, - data_prefix=dict(audio=data_root_val), - test_mode=True)) - -val_evaluator = dict(type='AccMetric') -test_evaluator = val_evaluator - -train_cfg = dict( - type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=5) -val_cfg = dict(type='ValLoop') -test_cfg = dict(type='TestLoop') - -param_scheduler = [ - dict(type='CosineAnnealingLR', eta_min=0, T_max=100, by_epoch=True) -] - -optim_wrapper = dict( - optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001), - clip_grad=dict(max_norm=40, norm_type=2)) - -default_hooks = dict( - checkpoint=dict(max_keep_ckpts=3, interval=5), logger=dict(interval=20)) diff --git a/mmaction/datasets/audio_dataset.py b/mmaction/datasets/audio_dataset.py index 42c98fb091..07aae25143 100644 --- a/mmaction/datasets/audio_dataset.py +++ b/mmaction/datasets/audio_dataset.py @@ -1,27 +1,21 @@ # Copyright (c) OpenMMLab. All rights reserved. import os.path as osp -from typing import Callable, List, Optional, Union +from typing import Callable, Dict, List, Optional, Union -import torch from mmengine.utils import check_file_exist from mmaction.registry import DATASETS -from mmaction.utils import ConfigType from .base import BaseActionDataset @DATASETS.register_module() class AudioDataset(BaseActionDataset): - """Audio dataset for action recognition. Annotation file can be that of the - rawframe dataset, or: + """Audio dataset for action recognition. - .. code-block:: txt - some/directory-1.wav 163 1 - some/directory-2.wav 122 1 - some/directory-3.wav 258 2 - some/directory-4.wav 234 2 - some/directory-5.wav 295 3 - some/directory-6.wav 121 3 + The ann_file is a text file with multiple lines, and each line indicates + a sample audio or extracted audio feature with the filepath, total frames + of the raw video and label, which are split with a whitespace. + Example of a annotation file: .. code-block:: txt some/directory-1.npy 163 1 @@ -33,26 +27,22 @@ class AudioDataset(BaseActionDataset): Args: ann_file (str): Path to the annotation file. - pipeline (List[Union[dict, ConfigDict, Callable]]): A sequence of - data transforms. - data_prefix (dict or ConfigDict, optional): Path to a directory where + pipeline (list[dict | callable]): A sequence of data transforms. + data_prefix (dict): Path to a directory where audios are held. Defaults to ``dict(audio='')``. multi_class (bool): Determines whether it is a multi-class recognition dataset. Defaults to False. num_classes (int, optional): Number of classes in the dataset. Defaults to None. - suffix (str): The suffix of the audio file. Defaults to ``.wav``. """ def __init__(self, ann_file: str, - pipeline: List[Union[ConfigType, Callable]], - data_prefix: ConfigType = dict(audio=''), + pipeline: List[Union[Dict, Callable]], + data_prefix: Dict = dict(audio=''), multi_class: bool = False, num_classes: Optional[int] = None, - suffix: str = '.wav', **kwargs) -> None: - self.suffix = suffix super().__init__( ann_file, pipeline, @@ -62,8 +52,8 @@ def __init__(self, modality='Audio', **kwargs) - def load_data_list(self) -> List[dict]: - """Load annotation file to get video information.""" + def load_data_list(self) -> List[Dict]: + """Load annotation file to get audio information.""" check_file_exist(self.ann_file) data_list = [] with open(self.ann_file, 'r') as fin: @@ -73,25 +63,18 @@ def load_data_list(self) -> List[dict]: idx = 0 filename = line_split[idx] if self.data_prefix['audio'] is not None: - if not filename.endswith(self.suffix): - filename = osp.join(self.data_prefix['audio'], - filename + self.suffix) - else: - filename = osp.join(self.data_prefix['audio'], - filename) + filename = osp.join(self.data_prefix['audio'], filename) video_info['audio_path'] = filename idx += 1 # idx for total_frames video_info['total_frames'] = int(line_split[idx]) idx += 1 - # idx for label[s] + # idx for label label = [int(x) for x in line_split[idx:]] assert label, f'missing label in line: {line}' if self.multi_class: assert self.num_classes is not None - onehot = torch.zeros(self.num_classes) - onehot[label] = 1.0 - video_info['label'] = onehot + video_info['label'] = label else: assert len(label) == 1 video_info['label'] = label[0] diff --git a/mmaction/datasets/transforms/__init__.py b/mmaction/datasets/transforms/__init__.py index d8b8cc4eb3..f2670cd929 100644 --- a/mmaction/datasets/transforms/__init__.py +++ b/mmaction/datasets/transforms/__init__.py @@ -1,9 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. from .formatting import (FormatAudioShape, FormatGCNInput, FormatShape, PackActionInputs, PackLocalizationInputs, Transpose) -from .loading import (ArrayDecode, AudioDecode, AudioDecodeInit, - AudioFeatureSelector, BuildPseudoClip, DecordDecode, - DecordInit, DenseSampleFrames, +from .loading import (ArrayDecode, AudioFeatureSelector, BuildPseudoClip, + DecordDecode, DecordInit, DenseSampleFrames, GenerateLocalizationLabels, ImageDecode, LoadAudioFeature, LoadHVULabel, LoadLocalizationFeature, LoadProposals, LoadRGBFromFile, OpenCVDecode, OpenCVInit, @@ -15,29 +14,28 @@ MMDecode, MMUniformSampleFrames, PadTo, PoseCompact, PoseDecode, PreNormalize2D, PreNormalize3D, ToMotion, UniformSampleFrames) -from .processing import (AudioAmplify, CenterCrop, ColorJitter, Flip, Fuse, - MelSpectrogram, MultiScaleCrop, RandomCrop, - RandomRescale, RandomResizedCrop, Resize, TenCrop, - ThreeCrop) +from .processing import (CenterCrop, ColorJitter, Flip, Fuse, MultiScaleCrop, + RandomCrop, RandomRescale, RandomResizedCrop, Resize, + TenCrop, ThreeCrop) from .text_transforms import CLIPTokenize from .wrappers import ImgAug, PytorchVideoWrapper, TorchVisionWrapper __all__ = [ - 'ArrayDecode', 'AudioAmplify', 'AudioDecode', 'AudioDecodeInit', - 'AudioFeatureSelector', 'BuildPseudoClip', 'CenterCrop', 'ColorJitter', - 'DecordDecode', 'DecordInit', 'DecordInit', 'DenseSampleFrames', 'Flip', - 'FormatAudioShape', 'FormatGCNInput', 'FormatShape', 'Fuse', 'GenSkeFeat', - 'GenerateLocalizationLabels', 'GeneratePoseTarget', 'ImageDecode', - 'ImgAug', 'JointToBone', 'LoadAudioFeature', 'LoadHVULabel', - 'LoadKineticsPose', 'LoadLocalizationFeature', 'LoadProposals', - 'LoadRGBFromFile', 'MelSpectrogram', 'MergeSkeFeat', 'MultiScaleCrop', - 'OpenCVDecode', 'OpenCVInit', 'OpenCVInit', 'PIMSDecode', 'PIMSInit', - 'PackActionInputs', 'PackLocalizationInputs', 'PadTo', 'PoseCompact', - 'PoseDecode', 'PreNormalize2D', 'PreNormalize3D', 'PyAVDecode', - 'PyAVDecodeMotionVector', 'PyAVInit', 'PyAVInit', 'PytorchVideoWrapper', - 'RandomCrop', 'RandomRescale', 'RandomResizedCrop', 'RawFrameDecode', - 'Resize', 'SampleAVAFrames', 'SampleFrames', 'TenCrop', 'ThreeCrop', - 'ToMotion', 'TorchVisionWrapper', 'Transpose', 'UniformSample', - 'UniformSampleFrames', 'UntrimmedSampleFrames', 'MMUniformSampleFrames', - 'MMDecode', 'MMCompact', 'CLIPTokenize' + 'ArrayDecode', 'AudioFeatureSelector', 'BuildPseudoClip', 'CenterCrop', + 'ColorJitter', 'DecordDecode', 'DecordInit', 'DecordInit', + 'DenseSampleFrames', 'Flip', 'FormatAudioShape', 'FormatGCNInput', + 'FormatShape', 'Fuse', 'GenSkeFeat', 'GenerateLocalizationLabels', + 'GeneratePoseTarget', 'ImageDecode', 'ImgAug', 'JointToBone', + 'LoadAudioFeature', 'LoadHVULabel', 'LoadKineticsPose', + 'LoadLocalizationFeature', 'LoadProposals', 'LoadRGBFromFile', + 'MergeSkeFeat', 'MultiScaleCrop', 'OpenCVDecode', 'OpenCVInit', + 'OpenCVInit', 'PIMSDecode', 'PIMSInit', 'PackActionInputs', + 'PackLocalizationInputs', 'PadTo', 'PoseCompact', 'PoseDecode', + 'PreNormalize2D', 'PreNormalize3D', 'PyAVDecode', 'PyAVDecodeMotionVector', + 'PyAVInit', 'PyAVInit', 'PytorchVideoWrapper', 'RandomCrop', + 'RandomRescale', 'RandomResizedCrop', 'RawFrameDecode', 'Resize', + 'SampleAVAFrames', 'SampleFrames', 'TenCrop', 'ThreeCrop', 'ToMotion', + 'TorchVisionWrapper', 'Transpose', 'UniformSample', 'UniformSampleFrames', + 'UntrimmedSampleFrames', 'MMUniformSampleFrames', 'MMDecode', 'MMCompact', + 'CLIPTokenize' ] diff --git a/mmaction/datasets/transforms/formatting.py b/mmaction/datasets/transforms/formatting.py index 6ca61a4ccc..9b9cb375a9 100644 --- a/mmaction/datasets/transforms/formatting.py +++ b/mmaction/datasets/transforms/formatting.py @@ -361,8 +361,17 @@ def __repr__(self) -> str: class FormatAudioShape(BaseTransform): """Format final audio shape to the given input_format. - Required keys are ``audios``, ``num_clips`` and ``clip_len``, added or - modified keys are ``audios`` and ``input_shape``. + Required Keys: + + - audios + + Modified Keys: + + - audios + + Added Keys: + + - input_shape Args: input_format (str): Define the final imgs format. @@ -374,7 +383,7 @@ def __init__(self, input_format: str) -> None: raise ValueError( f'The input format {self.input_format} is invalid.') - def transform(self, results: dict) -> dict: + def transform(self, results: Dict) -> Dict: """Performs the FormatShape formatting. Args: @@ -389,7 +398,7 @@ def transform(self, results: dict) -> dict: results['input_shape'] = audios.shape return results - def __repr__(self): + def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f"(input_format='{self.input_format}')" return repr_str diff --git a/mmaction/datasets/transforms/loading.py b/mmaction/datasets/transforms/loading.py index e876143cd3..22070371a1 100644 --- a/mmaction/datasets/transforms/loading.py +++ b/mmaction/datasets/transforms/loading.py @@ -1621,105 +1621,39 @@ def transform(self, results): @TRANSFORMS.register_module() -class AudioDecodeInit(BaseTransform): - """Using librosa to initialize the audio reader. - - Required keys are ``audio_path``, added or modified keys are ``length``, - ``sample_rate``, ``audios``. - - Args: - io_backend (str): io backend where frames are store. - Defaults to ``disk``. - sample_rate (int): Audio sampling times per second. Defaults to 16000. - pad_method (str): Padding method. Defaults to ``zero``. - """ - - def __init__(self, - io_backend: str = 'disk', - sample_rate: int = 16000, - pad_method: str = 'zero', - **kwargs) -> None: - self.io_backend = io_backend - self.sample_rate = sample_rate - if pad_method in ['random', 'zero']: - self.pad_method = pad_method - else: - raise NotImplementedError - self.kwargs = kwargs - self.file_client = None - - @staticmethod - def _zero_pad(shape: int) -> np.ndarray: - """Zero padding method.""" - return np.zeros(shape, dtype=np.float32) - - @staticmethod - def _random_pad(shape: int) -> np.ndarray: - """Random padding method.""" - # librosa load raw audio file into a distribution of -1~+1 - return np.random.rand(shape).astype(np.float32) * 2 - 1 - - def transform(self, results: dict) -> dict: - """Perform the librosa initialization. - - Args: - results (dict): The resulting dict to be modified and passed - to the next transform in pipeline. - """ - try: - import librosa - except ImportError: - raise ImportError('Please install librosa first.') +class LoadAudioFeature(BaseTransform): + """Load offline extracted audio features. - if self.file_client is None: - self.file_client = FileClient(self.io_backend, **self.kwargs) - if osp.exists(results['audio_path']): - file_obj = io.BytesIO(self.file_client.get(results['audio_path'])) - y, sr = librosa.load(file_obj, sr=self.sample_rate) - else: - # Generate a random dummy 10s input - pad_func = getattr(self, f'_{self.pad_method}_pad') - y = pad_func(int(round(10.0 * self.sample_rate))) - sr = self.sample_rate + Required Keys: - results['length'] = y.shape[0] - results['sample_rate'] = sr - results['audios'] = y - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(' - f'io_backend={self.io_backend}, ' - f'sample_rate={self.sample_rate}, ' - f'pad_method={self.pad_method})') - return repr_str + - audio_path + Added Keys: -@TRANSFORMS.register_module() -class LoadAudioFeature(BaseTransform): - """Load offline extracted audio features. + - length + - audios - Required keys are "audio_path", added or modified keys are "length", - audios". + Args: + pad_method (str): Padding method. Defaults to ``'zero'``. """ - def __init__(self, pad_method='zero'): + def __init__(self, pad_method: str = 'zero') -> None: if pad_method not in ['zero', 'random']: raise NotImplementedError self.pad_method = pad_method @staticmethod - def _zero_pad(shape): + def _zero_pad(shape: int) -> np.ndarray: """Zero padding method.""" return np.zeros(shape, dtype=np.float32) @staticmethod - def _random_pad(shape): + def _random_pad(shape: int) -> np.ndarray: """Random padding method.""" # spectrogram is normalized into a distribution of 0~1 return np.random.rand(shape).astype(np.float32) - def transform(self, results): + def transform(self, results: Dict) -> Dict: """Perform the numpy loading. Args: @@ -1738,68 +1672,12 @@ def transform(self, results): results['audios'] = feature_map return results - def __repr__(self): + def __repr__(self) -> str: repr_str = (f'{self.__class__.__name__}(' f'pad_method={self.pad_method})') return repr_str -@TRANSFORMS.register_module() -class AudioDecode(BaseTransform): - """Sample the audio w.r.t. the frames selected. - - Args: - fixed_length (int): As the audio clip selected by frames sampled may - not be exactly the same, ``fixed_length`` will truncate or pad them - into the same size. Defaults to 32000. - - Required keys are ``frame_inds``, ``num_clips``, ``total_frames``, - ``length``, added or modified keys are ``audios``, ``audios_shape``. - """ - - def __init__(self, fixed_length: int = 32000) -> None: - self.fixed_length = fixed_length - - def transform(self, results: dict) -> dict: - """Perform the ``AudioDecode`` to pick audio clips.""" - audio = results['audios'] - frame_inds = results['frame_inds'] - num_clips = results['num_clips'] - resampled_clips = list() - frame_inds = frame_inds.reshape(num_clips, -1) - for clip_idx in range(num_clips): - clip_frame_inds = frame_inds[clip_idx] - start_idx = max( - 0, - int( - round((clip_frame_inds[0] + 1) / results['total_frames'] * - results['length']))) - end_idx = min( - results['length'], - int( - round((clip_frame_inds[-1] + 1) / results['total_frames'] * - results['length']))) - cropped_audio = audio[start_idx:end_idx] - if cropped_audio.shape[0] >= self.fixed_length: - truncated_audio = cropped_audio[:self.fixed_length] - else: - truncated_audio = np.pad( - cropped_audio, - ((0, self.fixed_length - cropped_audio.shape[0])), - mode='constant') - - resampled_clips.append(truncated_audio) - - results['audios'] = np.array(resampled_clips) - results['audios_shape'] = results['audios'].shape - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f"(fixed_length='{self.fixed_length}')" - return repr_str - - @TRANSFORMS.register_module() class BuildPseudoClip(BaseTransform): """Build pseudo clips with one single image by repeating it n times. @@ -1840,19 +1718,32 @@ def __repr__(self): class AudioFeatureSelector(BaseTransform): """Sample the audio feature w.r.t. the frames selected. - Required keys are "audios", "frame_inds", "num_clips", "length", - "total_frames", added or modified keys are "audios", "audios_shape". + Required Keys: + + - audios + - frame_inds + - num_clips + - length + - total_frames + + Modified Keys: + + - audios + + Added Keys: + + - audios_shape Args: fixed_length (int): As the features selected by frames sampled may not be exactly the same, `fixed_length` will truncate or pad them - into the same size. Default: 128. + into the same size. Defaults to 128. """ - def __init__(self, fixed_length=128): + def __init__(self, fixed_length: int = 128) -> None: self.fixed_length = fixed_length - def transform(self, results): + def transform(self, results: Dict) -> Dict: """Perform the ``AudioFeatureSelector`` to pick audio feature clips. Args: @@ -1891,7 +1782,7 @@ def transform(self, results): results['audios_shape'] = results['audios'].shape return results - def __repr__(self): + def __repr__(self) -> str: repr_str = (f'{self.__class__.__name__}(' f'fix_length={self.fixed_length})') return repr_str diff --git a/mmaction/datasets/transforms/processing.py b/mmaction/datasets/transforms/processing.py index 13637dcf38..3d432bd723 100644 --- a/mmaction/datasets/transforms/processing.py +++ b/mmaction/datasets/transforms/processing.py @@ -1276,117 +1276,6 @@ def __repr__(self): return repr_str -@TRANSFORMS.register_module() -class AudioAmplify(BaseTransform): - """Amplify the waveform. - - Required keys are ``audios``, added or modified keys are ``audios``, - ``amplify_ratio``. - - Args: - ratio (float): The ratio used to amplify the audio waveform. - """ - - def __init__(self, ratio: float) -> None: - if isinstance(ratio, float): - self.ratio = ratio - else: - raise TypeError('Amplification ratio should be float.') - - def transform(self, results: dict) -> dict: - """Perform the audio amplification. - - Args: - results (dict): The resulting dict to be modified and passed - to the next transform in pipeline. - """ - - assert 'audios' in results - results['audios'] *= self.ratio - results['amplify_ratio'] = self.ratio - - return results - - def __repr__(self): - repr_str = f'{self.__class__.__name__}(ratio={self.ratio})' - return repr_str - - -@TRANSFORMS.register_module() -class MelSpectrogram(BaseTransform): - """MelSpectrogram. Transfer an audio wave into a melspectogram figure. - - Required keys are ``audios``, ``sample_rate``, ``num_clips``, added or - modified keys are ``audios``. - - Args: - window_size (int): The window size in millisecond. Defaults to 32. - step_size (int): The step size in millisecond. Defaults to 16. - n_mels (int): Number of mels. Defaults to 80. - fixed_length (int): The sample length of melspectrogram maybe not - exactly as wished due to different fps, fix the length for batch - collation by truncating or padding. Defaults to 128. - """ - - def __init__(self, - window_size: int = 32, - step_size: int = 16, - n_mels: int = 80, - fixed_length: int = 128) -> None: - if all( - isinstance(x, int) - for x in [window_size, step_size, n_mels, fixed_length]): - self.window_size = window_size - self.step_size = step_size - self.n_mels = n_mels - self.fixed_length = fixed_length - else: - raise TypeError('All arguments should be int.') - - def transform(self, results: dict) -> dict: - """Perform MelSpectrogram transformation. - - Args: - results (dict): The resulting dict to be modified and passed - to the next transform in pipeline. - """ - try: - import librosa - except ImportError: - raise ImportError('Install librosa first.') - signals = results['audios'] - sample_rate = results['sample_rate'] - n_fft = int(round(sample_rate * self.window_size / 1000)) - hop_length = int(round(sample_rate * self.step_size / 1000)) - melspectrograms = list() - for clip_idx in range(results['num_clips']): - clip_signal = signals[clip_idx] - mel = librosa.feature.melspectrogram( - y=clip_signal, - sr=sample_rate, - n_fft=n_fft, - hop_length=hop_length, - n_mels=self.n_mels) - if mel.shape[0] >= self.fixed_length: - mel = mel[:self.fixed_length, :] - else: - mel = np.pad( - mel, ((0, self.fixed_length - mel.shape[0]), (0, 0)), - mode='edge') - melspectrograms.append(mel) - - results['audios'] = np.array(melspectrograms) - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}' - f'(window_size={self.window_size}), ' - f'step_size={self.step_size}, ' - f'n_mels={self.n_mels}, ' - f'fixed_length={self.fixed_length})') - return repr_str - - @TRANSFORMS.register_module() class RandomErasing(BaseTransform): """Randomly selects a rectangle region in an image and erase pixels. diff --git a/tests/datasets/transforms/test_loading.py b/tests/datasets/transforms/test_loading.py index 035a2213cc..ee2cc64717 100644 --- a/tests/datasets/transforms/test_loading.py +++ b/tests/datasets/transforms/test_loading.py @@ -10,8 +10,7 @@ from mmengine.testing import assert_dict_has_keys from numpy.testing import assert_array_almost_equal -from mmaction.datasets.transforms import (AudioDecode, AudioDecodeInit, - DecordDecode, DecordInit, +from mmaction.datasets.transforms import (DecordDecode, DecordInit, GenerateLocalizationLabels, LoadAudioFeature, LoadHVULabel, LoadLocalizationFeature, @@ -533,42 +532,6 @@ def test_rawframe_decode(self): f'{frame_selector.__class__.__name__}(io_backend=disk, ' f'decoding_backend=turbojpeg)') - def test_audio_decode_init(self): - try: - import soundfile as sf # noqa: F401 - except (OSError, ImportError): - return - target_keys = ['audios', 'length', 'sample_rate'] - inputs = copy.deepcopy(self.audio_results) - audio_decode_init = AudioDecodeInit() - results = audio_decode_init(inputs) - assert assert_dict_has_keys(results, target_keys) - - # test when no audio file exists - inputs = copy.deepcopy(self.audio_results) - inputs['audio_path'] = 'foo/foo/bar.wav' - audio_decode_init = AudioDecodeInit() - results = audio_decode_init(inputs) - assert assert_dict_has_keys(results, target_keys) - assert results['audios'].shape == (10.0 * - audio_decode_init.sample_rate, ) - assert repr(audio_decode_init) == ( - f'{audio_decode_init.__class__.__name__}(' - f'io_backend=disk, ' - f'sample_rate=16000, ' - f'pad_method=zero)') - - def test_audio_decode(self): - target_keys = ['frame_inds', 'audios'] - inputs = copy.deepcopy(self.audio_results) - inputs['frame_inds'] = np.arange(0, self.audio_total_frames, - 2)[:, np.newaxis] - inputs['num_clips'] = 1 - inputs['length'] = 1280 - audio_selector = AudioDecode() - results = audio_selector(inputs) - assert assert_dict_has_keys(results, target_keys) - def test_pyav_decode_motion_vector(self): pyav_init = PyAVInit() pyav = PyAVDecodeMotionVector() diff --git a/tests/datasets/transforms/test_processing.py b/tests/datasets/transforms/test_processing.py index 028f5d7129..cc7c18add2 100644 --- a/tests/datasets/transforms/test_processing.py +++ b/tests/datasets/transforms/test_processing.py @@ -7,11 +7,10 @@ from mmengine.testing import assert_dict_has_keys from numpy.testing import assert_array_almost_equal -from mmaction.datasets.transforms import (AudioAmplify, CenterCrop, - ColorJitter, Flip, Fuse, - MelSpectrogram, MultiScaleCrop, - RandomCrop, RandomResizedCrop, - Resize, TenCrop, ThreeCrop) +from mmaction.datasets.transforms import (CenterCrop, ColorJitter, Flip, Fuse, + MultiScaleCrop, RandomCrop, + RandomResizedCrop, Resize, TenCrop, + ThreeCrop) def check_crop(origin_imgs, result_imgs, result_bbox, num_crops=1): @@ -70,59 +69,6 @@ def check_flip(origin_imgs, result_imgs, flip_type): return True -class TestAudio: - - @staticmethod - def test_audio_amplify(): - target_keys = ['audios', 'amplify_ratio'] - with pytest.raises(TypeError): - # ratio should be float - AudioAmplify(1) - - audio = (np.random.rand(8, )) - results = dict(audios=audio) - amplifier = AudioAmplify(1.5) - results = amplifier(results) - assert assert_dict_has_keys(results, target_keys) - assert repr(amplifier) == (f'{amplifier.__class__.__name__}' - f'(ratio={amplifier.ratio})') - - @staticmethod - def test_melspectrogram(): - target_keys = ['audios'] - with pytest.raises(TypeError): - # ratio should be float - MelSpectrogram(window_size=12.5) - audio = (np.random.rand(1, 160000)) - - # test padding - results = dict(audios=audio, sample_rate=16000) - results['num_clips'] = 1 - results['sample_rate'] = 16000 - mel = MelSpectrogram() - try: - import soundfile as sf # noqa: F401 - except (OSError, ImportError): - return - - results = mel(results) - assert assert_dict_has_keys(results, target_keys) - - # test truncating - audio = (np.random.rand(1, 160000)) - results = dict(audios=audio, sample_rate=16000) - results['num_clips'] = 1 - results['sample_rate'] = 16000 - mel = MelSpectrogram(fixed_length=1) - results = mel(results) - assert assert_dict_has_keys(results, target_keys) - assert repr(mel) == (f'{mel.__class__.__name__}' - f'(window_size={mel.window_size}), ' - f'step_size={mel.step_size}, ' - f'n_mels={mel.n_mels}, ' - f'fixed_length={mel.fixed_length})') - - class TestColor: @staticmethod diff --git a/tools/data/build_audio_features.py b/tools/data/build_audio_features.py index 28356a0e64..cd3070bace 100644 --- a/tools/data/build_audio_features.py +++ b/tools/data/build_audio_features.py @@ -38,11 +38,16 @@ class AudioTools: `_. Args: - frame_rate (int): The frame rate per second of the video. Default: 30. - sample_rate (int): The sample rate for audio sampling. Default: 16000. - num_mels (int): Number of channels of the melspectrogram. Default: 80. - fft_size (int): fft_size / sample_rate is window size. Default: 1280. - hop_size (int): hop_size / sample_rate is step size. Default: 320. + frame_rate (int): The frame rate per second of the video. + Defaults to 30. + sample_rate (int): The sample rate for audio sampling. + Defaults to 16000. + num_mels (int): Number of channels of the melspectrogram. + Defaults to 80. + fft_size (int): fft_size / sample_rate is window size. + Defaults to 1280. + hop_size (int): hop_size / sample_rate is step size. + Defaults to 320. """ def __init__(self, @@ -290,15 +295,15 @@ def extract_audio_feature(wav_path, audio_tools, mel_out_dir): parser.add_argument('audio_home_path', type=str) parser.add_argument('spectrogram_save_path', type=str) parser.add_argument('--level', type=int, default=1) - parser.add_argument('--ext', default='.m4a') + parser.add_argument('--ext', default='m4a') parser.add_argument('--num-workers', type=int, default=4) parser.add_argument('--part', type=str, default='1/1') args = parser.parse_args() mmengine.mkdir_or_exist(args.spectrogram_save_path) - files = glob.glob( - osp.join(args.audio_home_path, '*/' * args.level, '*' + args.ext)) + files = glob.glob(args.audio_home_path + '/*' * args.level + '.' + + args.ext) print(f'found {len(files)} files.') files = sorted(files) if args.part is not None: From 3066b8735bcfcbab0079be37664f0c99508c58b1 Mon Sep 17 00:00:00 2001 From: LinXiaoZheng <90811472+Zheng-LinXiao@users.noreply.github.com> Date: Tue, 8 Aug 2023 10:48:42 +0800 Subject: [PATCH 03/24] [Doc] modify_readme (#2629) --- README_zh-CN.md | 2 +- projects/actionclip/README.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/README_zh-CN.md b/README_zh-CN.md index 7cdea2c165..52b5096c2a 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -26,7 +26,7 @@ [![Percentage of issues still open](https://isitmaintained.com/badge/open/open-mmlab/mmaction2.svg)](https://github.com/open-mmlab/mmaction2/issues) [๐Ÿ“˜ไธญๆ–‡ๆ–‡ๆกฃ](https://mmaction2.readthedocs.io/zh_CN/latest/index.html) | -[๐Ÿ› ๏ธๅฎ‰่ฃ…ๆŒ‡ๅ—](https://mmaction2.readthedocs.io/zh_CN/get_started/installation.html) | +[๐Ÿ› ๏ธๅฎ‰่ฃ…ๆŒ‡ๅ—](https://mmaction2.readthedocs.io/zh_CN/latest/get_started/installation.html) | [๐Ÿ‘€ๆจกๅž‹ๅบ“](https://mmaction2.readthedocs.io/zh_CN/latest/modelzoo_statistics.html) | [๐Ÿ†•ๆ›ดๆ–ฐๆ—ฅๅฟ—](https://mmaction2.readthedocs.io/en/latest/notes/changelog.html) | [๐Ÿš€่ฟ›่กŒไธญ้กน็›ฎ](https://github.com/open-mmlab/mmaction2/projects) | diff --git a/projects/actionclip/README.md b/projects/actionclip/README.md index a16b44e249..cfaf0e3f2b 100644 --- a/projects/actionclip/README.md +++ b/projects/actionclip/README.md @@ -120,6 +120,7 @@ print("Label probs:", probs) # [[9.995e-01 5.364e-07 6.666e-04]] ```python import mmengine +import torch from mmaction.utils import register_all_modules from mmaction.apis import inference_recognizer, init_recognizer From 4c1b19bdb7b36ec634defaff1b4468e71d103721 Mon Sep 17 00:00:00 2001 From: wxDai Date: Thu, 10 Aug 2023 15:32:12 +0800 Subject: [PATCH 04/24] [Feature] Add audio demo (#2603) --- demo/README.md | 29 +++++++++++++++++++ demo/demo_audio.py | 57 ++++++++++++++++++++++++++++++++++++++ mmaction/apis/inference.py | 14 ++++++++-- 3 files changed, 98 insertions(+), 2 deletions(-) create mode 100644 demo/demo_audio.py diff --git a/demo/README.md b/demo/README.md index f82ee2f8c8..6d97f18fd1 100644 --- a/demo/README.md +++ b/demo/README.md @@ -10,6 +10,7 @@ - [SpatioTemporal Action Detection Video Demo](#spatiotemporal-action-detection-video-demo): A demo script to predict the spatiotemporal action detection result using a single video. - [SpatioTemporal Action Detection ONNX Video Demo](#spatiotemporal-action-detection-onnx-video-demo): A demo script to predict the SpatioTemporal Action Detection result using the onnx file instead of building the PyTorch models. - [Inferencer Demo](#inferencer): A demo script to implement fast predict for video analysis tasks based on unified inferencer interface. +- [Audio Demo](#audio-demo): A demo script to predict the recognition result using a single audio file. ## Modify configs through script arguments @@ -438,3 +439,31 @@ Assume that you are located at `$MMACTION2`. --rec tsn \ --label-file tools/data/kinetics/label_map_k400.txt ``` + +## Audio Demo + +Demo script to predict the audio-based action recognition using a single audio feature. + +The script [`extract_audio.py`](/tools/data/extract_audio.py) can be used to extract audios from videos and the script [`build_audio_features.py`](/tools/data/build_audio_features.py) can be used to extract the audio features. + +```shell +python demo/demo_audio.py ${CONFIG_FILE} ${CHECKPOINT_FILE} ${AUDIO_FILE} {LABEL_FILE} [--device ${DEVICE}] +``` + +Optional arguments: + +- `DEVICE`: Type of device to run the demo. Allowed values are cuda devices like `cuda:0` or `cpu`. If not specified, it will be set to `cuda:0`. + +Examples: + +Assume that you are located at `$MMACTION2` and have already downloaded the checkpoints to the directory `checkpoints/`, +or use checkpoint url from `configs/` to directly load the corresponding checkpoint, which will be automatically saved in `$HOME/.cache/torch/checkpoints`. + +1. Recognize an audio file as input by using a tsn model on cuda by default. + + ```shell + python demo/demo_audio.py \ + configs/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature.py \ + https://download.openmmlab.com/mmaction/v1.0/recognition_audio/resnet/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature/tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature_20230702-e4642fb0.pth \ + audio_feature.npy tools/data/kinetics/label_map_k400.txt + ``` diff --git a/demo/demo_audio.py b/demo/demo_audio.py new file mode 100644 index 0000000000..2da446a2da --- /dev/null +++ b/demo/demo_audio.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from operator import itemgetter + +import torch +from mmengine import Config, DictAction + +from mmaction.apis import inference_recognizer, init_recognizer + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMAction2 demo') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file/url') + parser.add_argument('audio', help='audio file') + parser.add_argument('label', help='label file') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + default={}, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. For example, ' + "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") + parser.add_argument( + '--device', type=str, default='cuda:0', help='CPU/CUDA device option') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + device = torch.device(args.device) + cfg = Config.fromfile(args.config) + cfg.merge_from_dict(args.cfg_options) + model = init_recognizer(cfg, args.checkpoint, device=device) + + if not args.audio.endswith('.npy'): + raise NotImplementedError('Demo works on extracted audio features') + pred_result = inference_recognizer(model, args.audio) + + pred_scores = pred_result.pred_scores.item.tolist() + score_tuples = tuple(zip(range(len(pred_scores)), pred_scores)) + score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True) + top5_label = score_sorted[:5] + + labels = open(args.label).readlines() + labels = [x.strip() for x in labels] + results = [(labels[k[0]], k[1]) for k in top5_label] + + print('The top-5 labels with corresponding scores are:') + for result in results: + print(f'{result[0]}: ', result[1]) + + +if __name__ == '__main__': + main() diff --git a/mmaction/apis/inference.py b/mmaction/apis/inference.py index 3561101e00..f382ec1f11 100644 --- a/mmaction/apis/inference.py +++ b/mmaction/apis/inference.py @@ -1,4 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp from pathlib import Path from typing import List, Optional, Union @@ -80,8 +81,11 @@ def inference_recognizer(model: nn.Module, input_flag = None if isinstance(video, dict): input_flag = 'dict' - elif isinstance(video, str): - input_flag = 'video' + elif isinstance(video, str) and osp.exists(video): + if video.endswith('.npy'): + input_flag = 'audio' + else: + input_flag = 'video' else: raise RuntimeError(f'The type of argument `video` is not supported: ' f'{type(video)}') @@ -90,6 +94,12 @@ def inference_recognizer(model: nn.Module, data = video if input_flag == 'video': data = dict(filename=video, label=-1, start_index=0, modality='RGB') + if input_flag == 'audio': + data = dict( + audio_path=video, + total_frames=len(np.load(video)), + start_index=0, + label=-1) data = test_pipeline(data) data = pseudo_collate([data]) From f539772f34a985b3ba980c237265d067e5561120 Mon Sep 17 00:00:00 2001 From: wxDai Date: Thu, 10 Aug 2023 17:21:22 +0800 Subject: [PATCH 05/24] [Fix] Support MPS device (#2619) --- mmaction/models/heads/tpn_head.py | 4 ++-- mmaction/models/losses/hvu_loss.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/mmaction/models/heads/tpn_head.py b/mmaction/models/heads/tpn_head.py index 6f32f65109..fb2fa4e907 100644 --- a/mmaction/models/heads/tpn_head.py +++ b/mmaction/models/heads/tpn_head.py @@ -2,6 +2,7 @@ from typing import Optional import torch.nn as nn +from mmengine.device import get_device from torch import Tensor from mmaction.registry import MODELS @@ -26,8 +27,7 @@ def __init__(self, *args, **kwargs) -> None: def _init_new_cls(self) -> None: self.new_cls = nn.Conv3d(self.in_channels, self.num_classes, 1, 1, 0) - if next(self.fc_cls.parameters()).is_cuda: - self.new_cls = self.new_cls.cuda() + self.new_cls = self.new_cls.to(get_device()) self.new_cls.weight.copy_(self.fc_cls.weight[..., None, None, None]) self.new_cls.bias.copy_(self.fc_cls.bias) diff --git a/mmaction/models/losses/hvu_loss.py b/mmaction/models/losses/hvu_loss.py index 38be482ab2..d3f7aaa274 100644 --- a/mmaction/models/losses/hvu_loss.py +++ b/mmaction/models/losses/hvu_loss.py @@ -1,6 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn.functional as F +from mmengine.device import get_device from mmaction.registry import MODELS from .base import BaseWeightedLoss @@ -111,7 +112,8 @@ def _forward(self, cls_score, label, mask, category_mask): # there should be at least one sample which contains tags # in this category if torch.sum(category_mask_i) < 0.5: - losses[f'{name}_LOSS'] = torch.tensor(.0).cuda() + losses[f'{name}_LOSS'] = torch.tensor( + .0, device=get_device()) loss_weights[f'{name}_LOSS'] = .0 continue category_loss = torch.sum(category_loss * category_mask_i) From d1645c0a731b1ed7e67c6e135d84d4855d6b6d6f Mon Sep 17 00:00:00 2001 From: SCZwangxiao <31362395+SCZwangxiao@users.noreply.github.com> Date: Mon, 14 Aug 2023 15:02:14 +0800 Subject: [PATCH 06/24] [Doc] Translate the doc of Uniformer V1&V2 into Chinese (#2592) --- configs/recognition/uniformer/README.md | 2 +- configs/recognition/uniformer/README_zh-CN.md | 55 +++++++++++ .../recognition/uniformerv2/README_zh-CN.md | 98 +++++++++++++++++++ 3 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 configs/recognition/uniformer/README_zh-CN.md create mode 100644 configs/recognition/uniformerv2/README_zh-CN.md diff --git a/configs/recognition/uniformer/README.md b/configs/recognition/uniformer/README.md index 6d04b7920e..4cbbba0e8a 100644 --- a/configs/recognition/uniformer/README.md +++ b/configs/recognition/uniformer/README.md @@ -32,7 +32,7 @@ The models are ported from the repo [UniFormer](https://github.com/Sense-X/UniFo 2. The values in `top1/5 acc` is tested on the same data list as the original repo, and the label map is provided by [UniFormer](https://drive.google.com/drive/folders/17VB-XdF3Kfr9ORmnGyXCxTMs86n0L4QL). The total videos are available at [Kinetics400](https://pan.baidu.com/s/1t5K0FRz3PGAT-37-3FwAfg) (BaiduYun password: g5kp), which consists of 19787 videos. 3. The values in columns named after "mm-Kinetics" are the testing results on the Kinetics dataset held by MMAction2, which is also used by other models in MMAction2. Due to the differences between various versions of Kinetics dataset, there is a little gap between `top1/5 acc` and `mm-Kinetics top1/5 acc`. For a fair comparison with other models, we report both results here. Note that we simply report the inference results, since the training set is different between UniFormer and other models, the results are lower than that tested on the author's version. 4. Since the original models for Kinetics-400/600/700 adopt different [label file](https://drive.google.com/drive/folders/17VB-XdF3Kfr9ORmnGyXCxTMs86n0L4QL), we simply map the weight according to the label name. New label map for Kinetics-400/600/700 can be found [here](https://github.com/open-mmlab/mmaction2/tree/main/tools/data/kinetics). -5. Due to some difference between [SlowFast](https://github.com/facebookresearch/SlowFast) and MMAction, there are some gaps between their performances. +5. Due to some difference between [SlowFast](https://github.com/facebookresearch/SlowFast) and MMAction2, there are some gaps between their performances. For more details on data preparation, you can refer to [preparing_kinetics](/tools/data/kinetics/README.md). diff --git a/configs/recognition/uniformer/README_zh-CN.md b/configs/recognition/uniformer/README_zh-CN.md new file mode 100644 index 0000000000..b5bda7f039 --- /dev/null +++ b/configs/recognition/uniformer/README_zh-CN.md @@ -0,0 +1,55 @@ +# UniFormer + +[UniFormer: Unified Transformer for Efficient Spatiotemporal Representation Learning](https://arxiv.org/abs/2201.04676) + + + +## ็ฎ€ไป‹ + +```BibTeX +@inproceedings{ + li2022uniformer, + title={UniFormer: Unified Transformer for Efficient Spatial-Temporal Representation Learning}, + author={Kunchang Li and Yali Wang and Gao Peng and Guanglu Song and Yu Liu and Hongsheng Li and Yu Qiao}, + booktitle={International Conference on Learning Representations}, + year={2022}, + url={https://openreview.net/forum?id=nBU_u6DLvoK} +} +``` + +## ๆจกๅž‹ๅบ“ + +### Kinetics-400 + +| ๅธง้‡‡ๆ ท็ญ–็•ฅ | ๅˆ†่พจ็Ž‡ | ไธปๅนฒ็ฝ‘็ปœ | top1 ๅ‡†็กฎ็Ž‡ | top5 ๅ‡†็กฎ็Ž‡ | [ๅ‚่€ƒๆ–‡็Œฎ](https://github.com/Sense-X/UniFormer/blob/main/video_classification/README.md) top1 ๅ‡†็กฎ็Ž‡ | [ๅ‚่€ƒๆ–‡็Œฎ](https://github.com/Sense-X/UniFormer/blob/main/video_classification/README.md) top5 ๅ‡†็กฎ็Ž‡ | mm-Kinetics top1 ๅ‡†็กฎ็Ž‡ | mm-Kinetics top5 ๅ‡†็กฎ็Ž‡ | ๆต‹่ฏ•ๆ–นๆกˆ | FLOPs | ๅ‚ๆ•ฐ้‡ | ้…็ฝฎๆ–‡ไปถ | ckpt | +| :--------: | :------------: | :---------: | :---------: | :---------: | :---------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------: | :---------------------: | :---------------------: | :--------------: | :---: | :----: | :-----------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------: | +| 16x4x1 | short-side 320 | UniFormer-S | 80.9 | 94.6 | 80.8 | 94.7 | 80.9 | 94.6 | 4 clips x 1 crop | 41.8G | 21.4M | [config](/configs/recognition/uniformer/uniformer-small_imagenet1k-pre_16x4x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv1/uniformer-small_imagenet1k-pre_16x4x1_kinetics400-rgb_20221219-c630a037.pth) | +| 16x4x1 | short-side 320 | UniFormer-B | 82.0 | 95.0 | 82.0 | 95.1 | 82.0 | 95.0 | 4 clips x 1 crop | 96.7G | 49.8M | [config](/configs/recognition/uniformer/uniformer-base_imagenet1k-pre_16x4x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv1/uniformer-base_imagenet1k-pre_16x4x1_kinetics400-rgb_20221219-157c2e66.pth) | +| 32x4x1 | short-side 320 | UniFormer-B | 83.1 | 95.3 | 82.9 | 95.4 | 83.0 | 95.3 | 4 clips x 1 crop | 59G | 49.8M | [config](/configs/recognition/uniformer/uniformer-base_imagenet1k-pre_32x4x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv1/uniformer-base_imagenet1k-pre_32x4x1_kinetics400-rgb_20221219-b776322c.pth) | + +่ฟ™ไบ›ๆจกๅž‹่ฟ็งป่‡ช [UniFormer](https://github.com/Sense-X/UniFormer/blob/main/video_classification/README.md)ไป“ๅบ“๏ผŒๅนถๅœจๆˆ‘ไปฌ็š„ๆ•ฐๆฎไธŠ่ฟ›่กŒไบ†ๆต‹่ฏ•ใ€‚็›ฎๅ‰๏ผŒๆˆ‘ไปฌไป…ๆ”ฏๆŒๅฏน UniFormer ๆจกๅž‹็š„ๆต‹่ฏ•๏ผŒ่ฎญ็ปƒๅŠŸ่ƒฝๅฐ†ๅพˆๅฟซๆไพ›ใ€‚ + +1. ๅ็งฐไธบ"ๅ‚่€ƒๆ–‡็Œฎ"็š„ๅˆ—ไธญ็š„ๅ€ผๆ˜ฏๅŽŸๅง‹ไป“ๅบ“็š„็ป“ๆžœใ€‚ +2. `top1/5 ๅ‡†็กฎ็Ž‡`ไธญ็š„ๅ€ผๆ˜ฏๆจกๅž‹ๅœจไธŽๅŽŸๅง‹ไป“ๅบ“็›ธๅŒ็š„ๆ•ฐๆฎ้›†ไธŠ็š„ๆต‹่ฏ•็ป“ๆžœ๏ผŒๅˆ†็ฑปๅ™จ็ป“ๆžœ-ๆ ‡็ญพๆ˜ ๅฐ„ไธŽ[UniFormer](https://drive.google.com/drive/folders/17VB-XdF3Kfr9ORmnGyXCxTMs86n0L4QL)ไธ€่‡ดใ€‚ๆ•ฐๆฎ้›†ๆ€ปๅ…ฑๆœ‰19787ไธช่ง†้ข‘๏ผŒๅฏไปฅๅœจ[Kinetics400](https://pan.baidu.com/s/1t5K0FRz3PGAT-37-3FwAfg)๏ผˆ็™พๅบฆไบ‘ๅฏ†็ ๏ผšg5kp๏ผ‰ไธญ่Žทๅ–ใ€‚ +3. ๅ็งฐไธบ "mm-Kinetics" ็š„ๅˆ—ไธญ็š„ๅ€ผๆ˜ฏๆจกๅž‹ๅœจ MMAction2 ๆŒๆœ‰็š„ Kinetics ๆ•ฐๆฎ้›†ไธŠ็š„ๆต‹่ฏ•็ป“ๆžœ๏ผŒๅ…ถไป– MMAction2 ๆจกๅž‹ไนŸไฝฟ็”จไบ†่ฏฅๆ•ฐๆฎ้›†ใ€‚็”ฑไบŽ Kinetics ๆ•ฐๆฎ้›†็š„ๅ„ไธช็‰ˆๆœฌไน‹้—ดๅญ˜ๅœจๅทฎๅผ‚๏ผŒๅ› ๆญค `top1/5 ๅ‡†็กฎ็Ž‡` ๅ’Œ `mm-Kinetics top1/5 ๅ‡†็กฎ็Ž‡` ไน‹้—ดๅญ˜ๅœจไธ€ไบ›ๅทฎ่ทใ€‚ไธบไบ†ไธŽๅ…ถไป–ๆจกๅž‹่ฟ›่กŒๅ…ฌๅนณๆฏ”่พƒ๏ผŒๆˆ‘ไปฌๅœจ่ฟ™้‡ŒๆŠฅๅ‘Šไบ†ไธคไธช็ป“ๆžœใ€‚่ฏทๆณจๆ„๏ผŒๆˆ‘ไปฌๅชๆŠฅๅ‘Šไบ†ๆŽจ็†็ป“ๆžœ๏ผŒ็”ฑไบŽ UniFormer ๅ’Œๅ…ถไป–ๆจกๅž‹ไน‹้—ด็š„่ฎญ็ปƒ้›†ไธๅŒ๏ผŒ่ฏฅ็ป“ๆžœไฝŽไบŽๅœจไฝœ่€…็‰ˆๆœฌไธŠๆต‹่ฏ•็š„็ป“ๆžœใ€‚ +4. ็”ฑไบŽ Kinetics-400/600/700 ็š„ๅŽŸๅง‹ๆจกๅž‹้‡‡็”จไบ†ไธๅŒ็š„[ๆ ‡็ญพๆ–‡ไปถ](https://drive.google.com/drive/folders/17VB-XdF3Kfr9ORmnGyXCxTMs86n0L4QL)๏ผŒๆˆ‘ไปฌๆ นๆฎๆ ‡็ญพๅ็งฐ็ฎ€ๅ•ๅœฐๆ˜ ๅฐ„ไบ†ๆƒ้‡ใ€‚Kinetics-400/600/700 ็š„ๆ–ฐๆ ‡็ญพๆ˜ ๅฐ„ๅฏไปฅๅœจ[่ฟ™้‡Œ](https://github.com/open-mmlab/mmaction2/tree/main/tools/data/kinetics)ๆ‰พๅˆฐใ€‚ +5. ็”ฑไบŽ \[SlowFast\] (https://github.com/facebookresearch/SlowFast)ๅ’Œ MMAction2 ไน‹้—ดๅญ˜ๅœจไธ€ไบ›ๅทฎๅผ‚๏ผŒๅฎƒไปฌ็š„ๆ€ง่ƒฝๅญ˜ๅœจไธ€ไบ›ๅทฎ่ทใ€‚ + +ๆœ‰ๅ…ณๆ•ฐๆฎๅ‡†ๅค‡็š„ๆ›ดๅคš่ฏฆ็ป†ไฟกๆฏ๏ผŒๆ‚จๅฏไปฅๅ‚่€ƒ[ๅ‡†ๅค‡_kinetics](/tools/data/kinetics/README_zh-CN.md)ใ€‚ + +## ๅฆ‚ไฝ•ๆต‹่ฏ• + +ๆ‚จๅฏไปฅไฝฟ็”จไปฅไธ‹ๅ‘ฝไปคๆฅๆต‹่ฏ•ๆจกๅž‹๏ผš + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +็คบไพ‹๏ผšๅœจKinetics-400ๆ•ฐๆฎ้›†ไธŠๆต‹่ฏ• UniFormer-S ๆจกๅž‹๏ผŒๅนถๅฐ†็ป“ๆžœ่ฝฌๅ‚จๅˆฐไธ€ไธช pkl ๆ–‡ไปถไธญใ€‚ + +```shell +python tools/test.py configs/recognition/uniformer/uniformer-small_imagenet1k-pre_16x4x1_kinetics400-rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --dump result.pkl +``` + +ๆœ‰ๅ…ณๆ›ดๅคš่ฏฆ็ป†ไฟกๆฏ๏ผŒ่ฏทๅ‚่€ƒ[่ฎญ็ปƒๅ’Œๆต‹่ฏ•ๆ•™็จ‹](/docs/zh_cn/user_guides/train_test.md)ไธญ็š„**ๆต‹่ฏ•**้ƒจๅˆ†ใ€‚ diff --git a/configs/recognition/uniformerv2/README_zh-CN.md b/configs/recognition/uniformerv2/README_zh-CN.md new file mode 100644 index 0000000000..a8e43760b0 --- /dev/null +++ b/configs/recognition/uniformerv2/README_zh-CN.md @@ -0,0 +1,98 @@ +# UniFormerV2 + +[UniFormerV2: Spatiotemporal Learning by Arming Image ViTs with Video UniFormer](https://arxiv.org/abs/2211.09552) + + + +## ็ฎ€ไป‹ + +```BibTeX +@article{Li2022UniFormerV2SL, + title={UniFormerV2: Spatiotemporal Learning by Arming Image ViTs with Video UniFormer}, + author={Kunchang Li and Yali Wang and Yinan He and Yizhuo Li and Yi Wang and Limin Wang and Y. Qiao}, + journal={ArXiv}, + year={2022}, + volume={abs/2211.09552} +} +``` + +## ๆจกๅž‹ๅบ“ + +### Kinetics-400 + +| ๅ‡ๅŒ€้‡‡ๆ ทๅธงๆ•ฐ | ๅˆ†่พจ็Ž‡ | ไธปๅนฒ็ฝ‘็ปœ | ไธŽ่ฎญ็ปƒ | top1 ๅ‡†็กฎ็Ž‡ | top5 ๅ‡†็กฎ็Ž‡ | [ๅ‚่€ƒๆ–‡็Œฎ](https://github.com/OpenGVLab/UniFormerV2/blob/main/MODEL_ZOO.md) top1 ๅ‡†็กฎ็Ž‡ | [ๅ‚่€ƒๆ–‡็Œฎ](https://github.com/OpenGVLab/UniFormerV2/blob/main/MODEL_ZOO.md) top5 ๅ‡†็กฎ็Ž‡ | mm-Kinetics top1 ๅ‡†็กฎ็Ž‡ | mm-Kinetics top5 ๅ‡†็กฎ็Ž‡ | ๆต‹่ฏ•ๆ–นๆกˆ | FLOPs | ๅ‚ๆ•ฐ้‡ | ้…็ฝฎๆ–‡ไปถ | ckpt | log | +| :----------: | :------------: | :--------------------: | :--------------: | :---------: | :---------: | :-------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------: | :---------------------: | :---------------------: | :--------------: | :---: | :----: | :---------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| 8 | short-side 320 | UniFormerV2-B/16 | clip | - | - | 84.3 | 96.4 | 84.4 | 96.3 | 4 clips x 3 crop | 0.1T | 115M | [config](/configs/recognition/uniformerv2/uniformerv2-base-p16-res224_clip_8xb32-u8_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip_8xb32-u8_kinetics400-rgb/uniformerv2-base-p16-res224_clip_8xb32-u8_kinetics400-rgb_20230313-e29fc968.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip_8xb32-u8_kinetics400-rgb/uniformerv2-base-p16-res224_clip_8xb32-u8_kinetics400-rgb.log) | +| 8 | short-side 320 | UniFormerV2-B/16 | clip-kinetics710 | - | - | 85.6 | 97.0 | 85.8 | 97.1 | 4 clips x 3 crop | 0.1T | 115M | [config](/configs/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb_20230313-75be0806.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb.log) | +| 8 | short-side 320 | UniFormerV2-L/14\* | clip-kinetics710 | 88.7 | 98.1 | 88.8 | 98.1 | 88.7 | 98.1 | 4 clips x 3 crop | 0.7T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res224_clip-kinetics710-pre_u8_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics400/uniformerv2-large-p14-res224_clip-kinetics710-pre_u8_kinetics400-rgb_20221219-972ea063.pth) | - | +| 16 | short-side 320 | UniFormerV2-L/14\* | clip-kinetics710 | 89.0 | 98.2 | 89.1 | 98.2 | 89.0 | 98.2 | 4 clips x 3 crop | 1.3T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res224_clip-kinetics710-pre_u16_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics400/uniformerv2-large-p14-res224_clip-kinetics710-pre_u16_kinetics400-rgb_20221219-6dc86d05.pth) | - | +| 32 | short-side 320 | UniFormerV2-L/14\* | clip-kinetics710 | 89.3 | 98.2 | 89.3 | 98.2 | 89.4 | 98.2 | 2 clips x 3 crop | 2.7T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res224_clip-kinetics710-pre_u32_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics400/uniformerv2-large-p14-res224_clip-kinetics710-pre_u32_kinetics400-rgb_20221219-56a46f64.pth) | - | +| 32 | short-side 320 | UniFormerV2-L/14@336\* | clip-kinetics710 | 89.5 | 98.4 | 89.7 | 98.3 | 89.5 | 98.4 | 2 clips x 3 crop | 6.3T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res336_clip-kinetics710-pre_u32_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics400/uniformerv2-large-p14-res336_clip-kinetics710-pre_u32_kinetics400-rgb_20221219-1dd7650f.pth) | - | + +### Kinetics-600 + +| ๅ‡ๅŒ€้‡‡ๆ ทๅธงๆ•ฐ | ๅˆ†่พจ็Ž‡ | ไธปๅนฒ็ฝ‘็ปœ | ้ข„่ฎญ็ปƒ | top1 ๅ‡†็กฎ็Ž‡ | top5 ๅ‡†็กฎ็Ž‡ | [ๅ‚่€ƒๆ–‡็Œฎ](https://github.com/OpenGVLab/UniFormerV2/blob/main/MODEL_ZOO.md) top1 ๅ‡†็กฎ็Ž‡ | [ๅ‚่€ƒๆ–‡็Œฎ](https://github.com/OpenGVLab/UniFormerV2/blob/main/MODEL_ZOO.md) top5 ๅ‡†็กฎ็Ž‡ | mm-Kinetics top1 ๅ‡†็กฎ็Ž‡ | mm-Kinetics top5 ๅ‡†็กฎ็Ž‡ | ๆต‹่ฏ•ๆ–นๆกˆ | FLOPs | ๅ‚ๆ•ฐ้‡ | ้…็ฝฎๆ–‡ไปถ | ckpt | log | +| :----------: | :----: | :--------------------: | :--------------: | :---------: | :---------: | :-------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------: | :---------------------: | :---------------------: | :--------------: | :---: | :----: | :---------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| 8 | Raw | UniFormerV2-B/16 | clip-kinetics710 | - | - | 86.1 | 97.2 | 86.4 | 97.3 | 4 clips x 3 crop | 0.1T | 115M | [config](/configs/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics600-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics600-rgb/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics600-rgb_20230313-544f06f0.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics600-rgb/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics600-rgb.log) | +| 8 | Raw | UniFormerV2-L/14\* | clip-kinetics710 | 89.0 | 98.3 | 89.0 | 98.2 | 87.5 | 98.0 | 4 clips x 3 crop | 0.7T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res224_clip-kinetics710-pre_u8_kinetics600-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics600/uniformerv2-large-p14-res224_clip-kinetics710-pre_u8_kinetics600-rgb_20221219-cf88e4c2.pth) | - | +| 16 | Raw | UniFormerV2-L/14\* | clip-kinetics710 | 89.4 | 98.3 | 89.4 | 98.3 | 87.8 | 98.0 | 4 clips x 3 crop | 1.3T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res224_clip-kinetics710-pre_u16_kinetics600-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics600/uniformerv2-large-p14-res224_clip-kinetics710-pre_u16_kinetics600-rgb_20221219-38ff0e3e.pth) | - | +| 32 | Raw | UniFormerV2-L/14\* | clip-kinetics710 | 89.2 | 98.3 | 89.5 | 98.3 | 87.7 | 98.1 | 2 clips x 3 crop | 2.7T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res224_clip-kinetics710-pre_u32_kinetics600-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics600/uniformerv2-large-p14-res224_clip-kinetics710-pre_u32_kinetics600-rgb_20221219-d450d071.pth) | - | +| 32 | Raw | UniFormerV2-L/14@336\* | clip-kinetics710 | 89.8 | 98.5 | 89.9 | 98.5 | 88.8 | 98.3 | 2 clips x 3 crop | 6.3T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res336_clip-kinetics710-pre_u32_kinetics600-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics600/uniformerv2-large-p14-res336_clip-kinetics710-pre_u32_kinetics600-rgb_20221219-f984f5d2.pth) | - | + +### Kinetics-700 + +| ๅ‡ๅŒ€้‡‡ๆ ทๅธงๆ•ฐ | ๅˆ†่พจ็Ž‡ | ไธปๅนฒ็ฝ‘็ปœ | ้ข„่ฎญ็ปƒ | top1 ๅ‡†็กฎ็Ž‡ | top5 ๅ‡†็กฎ็Ž‡ | [ๅ‚่€ƒๆ–‡็Œฎ](https://github.com/OpenGVLab/UniFormerV2/blob/main/MODEL_ZOO.md) top1 ๅ‡†็กฎ็Ž‡ | [ๅ‚่€ƒๆ–‡็Œฎ](https://github.com/OpenGVLab/UniFormerV2/blob/main/MODEL_ZOO.md) top5 ๅ‡†็กฎ็Ž‡ | mm-Kinetics top1 ๅ‡†็กฎ็Ž‡ | mm-Kinetics top5 ๅ‡†็กฎ็Ž‡ | ๆต‹่ฏ•ๆ–นๆกˆ | FLOPs | ๅ‚ๆ•ฐ้‡ | ้…็ฝฎๆ–‡ไปถ | ckpt | log | +| :----------: | :----: | :--------------------: | :--------------: | :---------: | :---------: | :-------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------: | :---------------------: | :---------------------: | :--------------: | :---: | :----: | :---------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| 8 | Raw | UniFormerV2-B/16 | clip | - | - | 75.8 | 92.8 | 75.9 | 92.9 | 4 clips x 3 crop | 0.1T | 115M | [config](/configs/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip_8xb32-u8_kinetics700-rgb/uniformerv2-base-p16-res224_clip_8xb32-u8_kinetics700-rgb_20230313-f02e48ad.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip_8xb32-u8_kinetics700-rgb/uniformerv2-base-p16-res224_clip_8xb32-u8_kinetics700-rgb.log) | +| 8 | Raw | UniFormerV2-B/16 | clip-kinetics710 | - | - | 76.3 | 92.7 | 76.3 | 92.9 | 4 clips x 3 crop | 0.1T | 115M | [config](/configs/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics700-rgb/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics700-rgb_20230313-69070837.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics700-rgb/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics700-rgb.log) | +| 8 | Raw | UniFormerV2-L/14\* | clip-kinetics710 | 80.8 | 95.2 | 80.8 | 95.4 | 79.4 | 94.8 | 4 clips x 3 crop | 0.7T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res224_clip-kinetics710-pre_u8_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics700/uniformerv2-large-p14-res224_clip-kinetics710-pre_u8_kinetics700-rgb_20221219-bfb9f401.pth) | - | +| 16 | Raw | UniFormerV2-L/14\* | clip-kinetics710 | 81.2 | 95.6 | 81.2 | 95.6 | 79.2 | 95.0 | 4 clips x 3 crop | 1.3T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res224_clip-kinetics710-pre_u16_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics700/uniformerv2-large-p14-res224_clip-kinetics710-pre_u16_kinetics700-rgb_20221219-745209d2.pth) | - | +| 32 | Raw | UniFormerV2-L/14\* | clip-kinetics710 | 81.4 | 95.7 | 81.5 | 95.7 | 79.8 | 95.3 | 2 clips x 3 crop | 2.7T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res224_clip-kinetics710-pre_u32_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics700/uniformerv2-large-p14-res224_clip-kinetics710-pre_u32_kinetics700-rgb_20221219-eebe7056.pth) | - | +| 32 | Raw | UniFormerV2-L/14@336\* | clip-kinetics710 | 82.1 | 96.0 | 82.1 | 96.1 | 80.6 | 95.6 | 2 clips x 3 crop | 6.3T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res336_clip-kinetics710-pre_u32_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics700/uniformerv2-large-p14-res336_clip-kinetics710-pre_u32_kinetics700-rgb_20221219-95cf9046.pth) | - | + +### MiTv1 + +| ๅ‡ๅŒ€้‡‡ๆ ทๅธงๆ•ฐ | ๅˆ†่พจ็Ž‡ | ไธปๅนฒ็ฝ‘็ปœ | ้ข„่ฎญ็ปƒ | top1 ๅ‡†็กฎ็Ž‡ | top5 ๅ‡†็กฎ็Ž‡ | [ๅ‚่€ƒๆ–‡็Œฎ](https://github.com/OpenGVLab/UniFormerV2/blob/main/MODEL_ZOO.md) top1 ๅ‡†็กฎ็Ž‡ | [ๅ‚่€ƒๆ–‡็Œฎ](https://github.com/OpenGVLab/UniFormerV2/blob/main/MODEL_ZOO.md) top5 ๅ‡†็กฎ็Ž‡ | ๆต‹่ฏ•ๆ–นๆกˆ | FLOPs | ๅ‚ๆ•ฐ้‡ | config | ckpt | log | +| :----------: | :----: | :--------------------: | :--------------------------: | :---------: | :---------: | :-------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------: | :--------------: | :---: | :----: | :------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| 8 | Raw | UniFormerV2-B/16 | clip-kinetics710-kinetics400 | 42.3 | 71.5 | 42.6 | 71.7 | 4 clips x 3 crop | 0.1T | 115M | [config](/configs/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-kinetics-k400-pre_16xb32-u8_mitv1-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-kinetics-k400-pre_16xb32-u8_mitv1-rgb/uniformerv2-base-p16-res224_clip-kinetics710-kinetics-k400-pre_16xb32-u8_mitv1-rgb_20230313-a6f4a567.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-kinetics-k400-pre_16xb32-u8_mitv1-rgb/uniformerv2-base-p16-res224_clip-kinetics710-kinetics-k400-pre_16xb32-u8_mitv1-rgb.log) | +| 8 | Raw | UniFormerV2-L/14\* | clip-kinetics710-kinetics400 | 47.0 | 76.1 | 47.0 | 76.1 | 4 clips x 3 crop | 0.7T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p16-res224_clip-kinetics710-kinetics-k400-pre_u8_mitv1-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/mitv1/uniformerv2-large-p16-res224_clip-kinetics710-kinetics-k400-pre_u8_mitv1-rgb_20221219-882c0598.pth) | - | +| 8 | Raw | UniFormerV2-L/14@336\* | clip-kinetics710-kinetics400 | 47.7 | 76.8 | 47.8 | 76.0 | 4 clips x 3 crop | 1.6T | 354M | [config](/configs/recognition/uniformerv2/uniformerv2-large-p16-res336_clip-kinetics710-kinetics-k400-pre_u8_mitv1-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/mitv1/uniformerv2-large-p16-res336_clip-kinetics710-kinetics-k400-pre_u8_mitv1-rgb_20221219-9020986e.pth) | - | + +### Kinetics-710 + +| ๅ‡ๅŒ€้‡‡ๆ ทๅธงๆ•ฐ | ๅˆ†่พจ็Ž‡ | ไธปๅนฒ็ฝ‘็ปœ | ้ข„่ฎญ็ปƒ | top1 ๅ‡†็กฎ็Ž‡ | top5 ๅ‡†็กฎ็Ž‡ | config | ckpt | log | +| :----------: | :----: | :--------------------: | :----: | :---------: | :---------: | :-------------------------------------------: | :------------------------------------------: | :-----------------------------------------: | +| 8 | Raw | UniFormerV2-B/16\* | clip | 78.9 | 94.2 | [config](/configs/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-pre_u8_kinetics710-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-pre_u8_kinetics710-rgb/uniformerv2-base-p16-res224_clip-pre_u8_kinetics710-rgb_20230612-63cdbad9.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-pre_u8_kinetics710-rgb/uniformerv2-base-p16-res224_clip-pre_u8_kinetics710-rgb.log) | +| 8 | Raw | UniFormerV2-L/14\* | clip | - | - | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res224_clip-pre_u8_kinetics710-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics710/uniformerv2-large-p14-res224_clip-pre_u8_kinetics710-rgb_20230612-d002a407.pth) | - | +| 8 | Raw | UniFormerV2-L/14@336\* | clip | - | - | [config](/configs/recognition/uniformerv2/uniformerv2-large-p14-res336_clip-pre_u8_kinetics710-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics710/uniformerv2-large-p14-res336_clip-pre_u8_kinetics710-rgb_20230612-d723ddc1.pth) | - | + +ไปฅไธŠๅธฆๆœ‰ * ็š„ๆจกๅž‹ๆ˜ฏ่ฟ็งป่‡ช[UniFormerV2ไป“ๅบ“](https://github.com/OpenGVLab/UniFormerV2/blob/main/MODEL_ZOO.md)๏ผŒๅนถๅœจๆˆ‘ไปฌ็š„ๆ•ฐๆฎไธŠ่ฟ›่กŒไบ†ๆต‹่ฏ•ใ€‚็”ฑไบŽ็ฎ—ๅŠ›้™ๅˆถ๏ผŒๆˆ‘ไปฌไป…ๆ”ฏๆŒๅŸบ็ก€ๆจกๅž‹๏ผˆๅณ UniFormerV2-B/16๏ผ‰่ฎญ็ปƒ้…็ฝฎ็š„ๅฏ้ ๆ€งใ€‚ + +1. "ๅ‚่€ƒๆ–‡็Œฎ"ๅˆ—ไธญ็š„ๆ•ฐๅ€ผๆ˜ฏๅŽŸๅง‹ไป“ๅบ“็š„็ป“ๆžœใ€‚ +2. `top1/5ๅ‡†็กฎ็Ž‡` ไธญ็š„ๆ•ฐๅ€ผๆ˜ฏๅœจไธŽๅŽŸๅง‹ไป“ๅบ“็›ธๅŒ็š„ๆ•ฐๆฎไธŠ่ฟ›่กŒๆต‹่ฏ•ๅพ—ๅˆฐ็š„๏ผŒๅนถไธ”ๅˆ†็ฑปๅ™จ-ๆ ‡็ญพๆ˜ ๅฐ„ไธŽ [UniFormerV2](https://drive.google.com/drive/folders/17VB-XdF3Kfr9ORmnGyXCxTMs86n0L4QL)ไธ€่‡ดใ€‚ +3. "mm-Kinetics" ๅˆ—ไธญ็š„ๆ•ฐๅ€ผๆ˜ฏๅœจ MMAction2 ๆŒๆœ‰็š„ Kinetics ๆ•ฐๆฎ้›†ไธŠ่ฟ›่กŒ็š„ๆต‹่ฏ•็ป“ๆžœ๏ผŒๅ…ถไป– MMAction2 ๆจกๅž‹ไนŸไฝฟ็”จไบ†่ฏฅๆ•ฐๆฎ้›†ใ€‚็”ฑไบŽๅ„ไธช็‰ˆๆœฌ็š„ Kinetics ๆ•ฐๆฎ้›†ไน‹้—ดๅญ˜ๅœจๅทฎๅผ‚๏ผŒ`top1/5ๅ‡†็กฎ็Ž‡` ๅ’Œ `mm-Kinetics top1/5ๅ‡†็กฎ็Ž‡` ไน‹้—ดๅญ˜ๅœจไธ€ไบ›ๅทฎๅผ‚ใ€‚ไธบไบ†ไธŽๅ…ถไป–ๆจกๅž‹่ฟ›่กŒๅ…ฌๅนณๆฏ”่พƒ๏ผŒๆˆ‘ไปฌๅœจ่ฟ™้‡ŒๆŠฅๅ‘Šไบ†ไธคไธช็ป“ๆžœใ€‚่ฏทๆณจๆ„๏ผŒๆˆ‘ไปฌๅชๆŠฅๅ‘ŠๆŽจๆ–ญ็ป“ๆžœ๏ผŒๅ› ไธบ UniFormer ๅ’Œๅ…ถไป–ๆจกๅž‹็š„่ฎญ็ปƒ้›†ไธๅŒ๏ผŒๆ‰€ไปฅ่ฏฅ็ป“ๆžœไฝŽไบŽๅœจไฝœ่€…็‰ˆๆœฌไธŠๆต‹่ฏ•็š„็ป“ๆžœใ€‚ +4. ็”ฑไบŽ Kinetics-400/600/700 ็š„ๅŽŸๅง‹ๆจกๅž‹้‡‡็”จไบ†ไธๅŒ็š„[ๆ ‡็ญพๆ–‡ไปถ](https://drive.google.com/drive/folders/17VB-XdF3Kfr9ORmnGyXCxTMs86n0L4QL)๏ผŒๆˆ‘ไปฌๆ นๆฎๆ ‡็ญพๅ็งฐ็ฎ€ๅ•ๆ˜ ๅฐ„ๆƒ้‡ใ€‚Kinetics-400/600/700็š„ๆ–ฐๆ ‡็ญพๆ˜ ๅฐ„ๅฏไปฅๅœจ[่ฟ™้‡Œ](/tools/data/kinetics)ๆ‰พๅˆฐใ€‚ +5. ็”ฑไบŽ [SlowFast](https://github.com/facebookresearch/SlowFast)ๅ’Œ MMAction2 ไน‹้—ดๅญ˜ๅœจไธ€ไบ›ๅทฎๅผ‚๏ผŒๅฎƒไปฌ็š„ๆ€ง่ƒฝไน‹้—ดๅญ˜ๅœจไธ€ไบ›ๅทฎ่ทใ€‚ +6. ๆˆ‘ไปฌไฝฟ็”จKinetics-710่ฟ›่กŒ้ข„่ฎญ็ปƒ๏ผŒ่ฟ™ๆœ‰ๅŠฉไบŽๆ้ซ˜ๅ…ถไป–ๆ•ฐๆฎ้›†็š„ๆ€ง่ƒฝใ€‚ไฝ ๅฏไปฅๅœจ[่ฎบๆ–‡](https://arxiv.org/abs/2211.09552)ไธญๆ‰พๅˆฐๆ›ดๅคš็ป†่Š‚ใ€‚ๆˆ‘ไปฌ่ฟ˜ๆ นๆฎ Kinetics-710 ็š„ๆจกๅž‹ๆƒ้‡่ฟ›่กŒไบ†ๆƒ้‡ๆ˜ ๅฐ„๏ผŒไฝ ๅฏไปฅๅœจ[่ฟ™้‡Œ](/tools/data/kinetics710/label_map_k710.txt)ๆ‰พๅˆฐๆ ‡็ญพๆ˜ ๅฐ„ใ€‚ + +ๆœ‰ๅ…ณๆ•ฐๆฎๅ‡†ๅค‡็š„ๆ›ดๅคš่ฏฆ็ป†ไฟกๆฏ๏ผŒๅฏไปฅๅ‚่€ƒไปฅไธ‹้“พๆŽฅ๏ผš + +- [ๅ‡†ๅค‡ Kinetics ๆ•ฐๆฎ้›†](/tools/data/kinetics/README_zh-CN.md) +- [ๅ‡†ๅค‡ MIT ๆ•ฐๆฎ้›†](/tools/data/mit/README_zh-CN.md) + +## ๅฆ‚ไฝ•ๆต‹่ฏ• + +ๆ‚จๅฏไปฅไฝฟ็”จไปฅไธ‹ๅ‘ฝไปคๆฅๆต‹่ฏ•ๆจกๅž‹๏ผš + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +็คบไพ‹๏ผšๅœจ Kinetics-400 ๆ•ฐๆฎ้›†ไธŠๆต‹่ฏ• UniFormerV2-B/16 ๆจกๅž‹๏ผŒๅนถๅฐ†็ป“ๆžœ่ฝฌๅ‚จๅˆฐไธ€ไธชpklๆ–‡ไปถไธญใ€‚ + +```shell +python tools/test.py configs/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_u8_kinetics400-rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --dump result.pkl +``` + +ๆœ‰ๅ…ณๆ›ดๅคš่ฏฆ็ป†ไฟกๆฏ๏ผŒ่ฏทๅ‚่€ƒ[่ฎญ็ปƒๅ’Œๆต‹่ฏ•ๆ•™็จ‹](/docs/zh_cn/user_guides/train_test.md)ไธญ็š„**ๆต‹่ฏ•**้ƒจๅˆ†ใ€‚ From f701d9477523d9c0ba9f2fa701f337f963d577df Mon Sep 17 00:00:00 2001 From: xiaoyi0919 <88712079+xiaoyi0919@users.noreply.github.com> Date: Tue, 15 Aug 2023 14:32:02 +0800 Subject: [PATCH 07/24] [Feature] Support video retrieval dataset MSVD (#2622) --- tools/data/video_retrieval/README.md | 38 +++++++++++++++ tools/data/video_retrieval/README_zh-CN.md | 38 +++++++++++++++ tools/data/video_retrieval/prepare_msvd.py | 55 ++++++++++++++++++++++ tools/data/video_retrieval/prepare_msvd.sh | 35 ++++++++++++++ 4 files changed, 166 insertions(+) create mode 100644 tools/data/video_retrieval/prepare_msvd.py create mode 100644 tools/data/video_retrieval/prepare_msvd.sh diff --git a/tools/data/video_retrieval/README.md b/tools/data/video_retrieval/README.md index 77f05ddcf7..99a7398c25 100644 --- a/tools/data/video_retrieval/README.md +++ b/tools/data/video_retrieval/README.md @@ -14,6 +14,16 @@ } ``` +```BibTeX +@inproceedings{chen2011collecting, + title={Collecting highly parallel data for paraphrase evaluation}, + author={Chen, David and Dolan, William B}, + booktitle={ACL}, + pages={190--200}, + year={2011} +} +``` + Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/video_retrieval/`. ## Preparing MSRVTT dataset @@ -43,3 +53,31 @@ mmaction2 โ”‚ โ”‚ โ”œโ”€โ”€ ... โ”‚ โ”‚ โ””โ”€โ”€ video9999.mp4 ``` + +## Preparing MSVD dataset + +For basic dataset information, you can refer to the MSVD dataset [website](https://www.cs.utexas.edu/users/ml/clamp/videoDescription/). Run the following command to prepare the MSVD dataset: + +```shell +bash prepare_msvd.sh +``` + +After preparation, the folder structure will look like: + +``` +mmaction2 +โ”œโ”€โ”€ mmaction +โ”œโ”€โ”€ tools +โ”œโ”€โ”€ configs +โ”œโ”€โ”€ data +โ”‚ โ”œโ”€โ”€ video_retrieval +โ”‚ โ”‚ โ””โ”€โ”€ msrvd +โ”‚ โ”‚ โ”œโ”€โ”€ train.json +โ”‚ โ”‚ โ”œโ”€โ”€ test.json +โ”‚ โ”‚ โ”œโ”€โ”€ val.json +โ”‚ โ”‚ โ””โ”€โ”€โ”€ videos +โ”‚ โ”‚ โ”œโ”€โ”€ xxx.avi +โ”‚ โ”‚ โ”œโ”€โ”€ xxx.avi +โ”‚ โ”‚ โ”œโ”€โ”€ ... +โ”‚ โ”‚ โ””โ”€โ”€ xxx.avi +``` diff --git a/tools/data/video_retrieval/README_zh-CN.md b/tools/data/video_retrieval/README_zh-CN.md index a4cd194f58..1d4374daea 100644 --- a/tools/data/video_retrieval/README_zh-CN.md +++ b/tools/data/video_retrieval/README_zh-CN.md @@ -14,6 +14,16 @@ } ``` +```BibTeX +@inproceedings{chen2011collecting, + title={Collecting highly parallel data for paraphrase evaluation}, + author={Chen, David and Dolan, William B}, + booktitle={ACL}, + pages={190--200}, + year={2011} +} +``` + ๅœจๆ•ฐๆฎ้›†ๅ‡†ๅค‡ๅ‰๏ผŒ่ฏท็กฎไฟๅ‘ฝไปค่กŒๅฝ“ๅ‰่ทฏๅพ„ไธบ `$MMACTION2/tools/data/video_retrieval/`ใ€‚ ## ๅ‡†ๅค‡ MSRVTT ๆ•ฐๆฎ้›† @@ -43,3 +53,31 @@ mmaction2 โ”‚ โ”‚ โ”œโ”€โ”€ ... โ”‚ โ”‚ โ””โ”€โ”€ video9999.mp4 ``` + +## ๅ‡†ๅค‡ MSVD ๆ•ฐๆฎ้›† + +็”จๆˆทๅฏๅ‚่€ƒ่ฏฅๆ•ฐๆฎ้›†็š„[ๅฎ˜็ฝ‘](https://www.cs.utexas.edu/users/ml/clamp/videoDescription/)๏ผŒไปฅ่Žทๅ–ๆ•ฐๆฎ้›†็›ธๅ…ณ็š„ๅŸบๆœฌไฟกๆฏใ€‚่ฟ่กŒไธ‹้ข็š„ๅ‘ฝไปคๅ‡†ๅค‡ MSVD ๆ•ฐๆฎ้›†๏ผš + +```shell +bash prepare_msvd.sh +``` + +ๅฎŒๅœบไธŠ่ฟฐๅ‡†ๅค‡ๆญฅ้ชคๅŽ๏ผŒๆ–‡ไปถ็›ฎๅฝ•ๅฆ‚ไธ‹๏ผš + +``` +mmaction2 +โ”œโ”€โ”€ mmaction +โ”œโ”€โ”€ tools +โ”œโ”€โ”€ configs +โ”œโ”€โ”€ data +โ”‚ โ”œโ”€โ”€ video_retrieval +โ”‚ โ”‚ โ””โ”€โ”€ msvd +โ”‚ โ”‚ โ”œโ”€โ”€ train.json +โ”‚ โ”‚ โ”œโ”€โ”€ text.json +โ”‚ โ”‚ โ”œโ”€โ”€ val.json +โ”‚ โ”‚ โ””โ”€โ”€โ”€ videos +โ”‚ โ”‚ โ”œโ”€โ”€ xxx.avi +โ”‚ โ”‚ โ”œโ”€โ”€ xxx.avi +โ”‚ โ”‚ โ”œโ”€โ”€ ... +โ”‚ โ”‚ โ””โ”€โ”€ xxx.avi +``` diff --git a/tools/data/video_retrieval/prepare_msvd.py b/tools/data/video_retrieval/prepare_msvd.py new file mode 100644 index 0000000000..b8cc4377cf --- /dev/null +++ b/tools/data/video_retrieval/prepare_msvd.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os.path as osp +import pickle + +DATA_DIR = '../../../data/video_retrieval/msvd' +SUFFIX = '.avi' + +data_path = osp.join(DATA_DIR, 'msvd_data/raw-captions.pkl') +train_txt_path = osp.join(DATA_DIR, 'msvd_data/train_list.txt') +test_txt_path = osp.join(DATA_DIR, 'msvd_data/test_list.txt') +val_txt_path = osp.join(DATA_DIR, 'msvd_data/val_list.txt') +train_json_path = osp.join(DATA_DIR, 'train.json') +test_json_path = osp.join(DATA_DIR, 'test.json') +val_json_path = osp.join(DATA_DIR, 'val.json') + +with open(data_path, 'rb') as F: + data = pickle.load(F) + +video_dict = {} +for one_data in data: + caption = data[one_data] + if one_data not in video_dict: + video_dict[one_data] = [] + for cap in caption: + video_dict[one_data].append(' '.join(cap)) + +with open(train_txt_path, 'r') as f: + train_avi = f.readlines() + +train_avi_list = {} +for video in train_avi: + train_avi_list[video.strip() + SUFFIX] = video_dict[video.strip()] + +with open(train_json_path, 'w') as f: + json.dump(train_avi_list, f) + +with open(test_txt_path, 'r') as f: + test_avi = f.readlines() + +test_avi_list = {} +for video in test_avi: + test_avi_list[video.strip() + SUFFIX] = video_dict[video.strip()] +with open(test_json_path, 'w') as f: + json.dump(test_avi_list, f) + +with open(val_txt_path, 'r') as f: + val_avi = f.readlines() + +val_avi_list = {} +for video in val_avi: + val_avi_list[video.strip() + SUFFIX] = video_dict[video.strip()] + +with open(val_json_path, 'w') as f: + json.dump(val_avi_list, f) diff --git a/tools/data/video_retrieval/prepare_msvd.sh b/tools/data/video_retrieval/prepare_msvd.sh new file mode 100644 index 0000000000..5f804fe8bf --- /dev/null +++ b/tools/data/video_retrieval/prepare_msvd.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +DATA_DIR="../../../data/video_retrieval/msvd" +mkdir -p ${DATA_DIR} + + +if [ -f "msvd_data.zip" ]; then + echo "msvd_data.zip exists, skip downloading!" +else + echo "Downloading msvd_data.zip." + wget https://github.com/ArrowLuo/CLIP4Clip/releases/download/v0.0/msvd_data.zip +fi + +echo "Processing annotations started." +unzip -q msvd_data.zip -d ${DATA_DIR} +python prepare_msvd.py +echo "Processing annotations completed." + +if [ -f "YouTubeClips.tar" ]; then + echo "YouTubeClips.tar exists, skip downloading!" +else + echo "Downloading YouTubeClips.tar." + wget https://www.cs.utexas.edu/users/ml/clamp/videoDescription/YouTubeClips.tar +fi + +echo "Processing videos started." +tar -xf YouTubeClips.tar -C ${DATA_DIR} +mkdir -p "${DATA_DIR}/videos/" && find "${DATA_DIR}/YouTubeClips" -name "*.avi" -exec mv {} "${DATA_DIR}/videos/" \; +echo "Processing videos completed." + +rm -rf "${DATA_DIR}/YouTubeClips" +rm -rf "${DATA_DIR}/msvd_data" +rm msvd_data.zip +rm YouTubeClips.tar +echo "The preparation of the msvd dataset has been successfully completed." From 96d20d38c91b3609bbb7ed4d99a94285c0a48c5b Mon Sep 17 00:00:00 2001 From: makecent <42603768+makecent@users.noreply.github.com> Date: Tue, 15 Aug 2023 15:58:06 +0800 Subject: [PATCH 08/24] [Enhance] Support 2D&3D Optical Flow Training (#2631) --- ..._r50_8xb16-16x4x1-256e_kinetics400-flow.py | 146 ++++++++++++++++++ ...d-r50_8xb32_5x1x3-110e_kinetics400-flow.py | 141 +++++++++++++++++ mmaction/datasets/transforms/formatting.py | 40 ++--- mmaction/datasets/transforms/loading.py | 8 +- tests/datasets/transforms/test_formating.py | 16 +- tests/datasets/transforms/test_loading.py | 8 +- 6 files changed, 314 insertions(+), 45 deletions(-) create mode 100644 configs/recognition/slowonly/slowonly_r50_8xb16-16x4x1-256e_kinetics400-flow.py create mode 100644 configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32_5x1x3-110e_kinetics400-flow.py diff --git a/configs/recognition/slowonly/slowonly_r50_8xb16-16x4x1-256e_kinetics400-flow.py b/configs/recognition/slowonly/slowonly_r50_8xb16-16x4x1-256e_kinetics400-flow.py new file mode 100644 index 0000000000..92221d9e97 --- /dev/null +++ b/configs/recognition/slowonly/slowonly_r50_8xb16-16x4x1-256e_kinetics400-flow.py @@ -0,0 +1,146 @@ +_base_ = '../../_base_/default_runtime.py' + +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + in_channels=2, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5, + average_clips='prob'), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[128, 128], + std=[128, 128], + format_shape='NCTHW')) + +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_flow.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_flow.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_flow.txt' +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict(type='SampleFrames', clip_len=16, frame_interval=4, num_clips=1), + dict(type='RawFrameDecode', **file_client_args), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=16, + frame_interval=4, + num_clips=2, + test_mode=True), + dict(type='RawFrameDecode', **file_client_args), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=16, + frame_interval=4, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode', **file_client_args), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + data_prefix=dict(img=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + data_prefix=dict(img=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + data_prefix=dict(img=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=256, val_begin=1, val_interval=8) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning policy +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=True, begin=0, end=34), + dict( + type='CosineAnnealingLR', + T_max=222, + eta_min=0, + by_epoch=True, + begin=34, + end=256) +] + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.2, momentum=0.9, weight_decay=1e-4), + clip_grad=dict(max_norm=40, norm_type=2)) + +# runtime settings +default_hooks = dict(checkpoint=dict(interval=8, max_keep_ckpts=3)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32_5x1x3-110e_kinetics400-flow.py b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32_5x1x3-110e_kinetics400-flow.py new file mode 100644 index 0000000000..a25eb31334 --- /dev/null +++ b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32_5x1x3-110e_kinetics400-flow.py @@ -0,0 +1,141 @@ +_base_ = '../../_base_/default_runtime.py' + +clip_len = 5 + +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='https://download.pytorch.org/models/resnet50-11ad3fa6.pth', + depth=50, + in_channels=2 * clip_len, # ``in_channels`` should be 2 * clip_len + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01, + average_clips='prob'), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[128, 128] * clip_len, # ``in_channels`` should be 2 * clip_len + std=[128, 128] * clip_len, # ``in_channels`` should be 2 * clip_len + format_shape='NCHW')) + +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_flow.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_flow.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_flow.txt' +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict( + type='SampleFrames', clip_len=clip_len, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode', **file_client_args), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=clip_len, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode', **file_client_args), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=clip_len, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + data_prefix=dict(img=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + data_prefix=dict(img=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + data_prefix=dict(img=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=110, val_begin=1, val_interval=5) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=40, norm_type=2)) + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=110, + by_epoch=True, + milestones=[70, 100], + gamma=0.1) +] + +default_hooks = dict(checkpoint=dict(interval=5, max_keep_ckpts=3)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (32 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=256) diff --git a/mmaction/datasets/transforms/formatting.py b/mmaction/datasets/transforms/formatting.py index 9b9cb375a9..168509be30 100644 --- a/mmaction/datasets/transforms/formatting.py +++ b/mmaction/datasets/transforms/formatting.py @@ -204,16 +204,20 @@ class FormatShape(BaseTransform): """Format final imgs shape to the given input_format. Required keys: + - imgs (optional) - heatmap_imgs (optional) + - modality (optional) - num_clips - clip_len Modified Keys: - - imgs (optional) - - input_shape (optional) + + - imgs Added Keys: + + - input_shape - heatmap_input_shape (optional) Args: @@ -227,7 +231,7 @@ def __init__(self, input_format: str, collapse: bool = False) -> None: self.input_format = input_format self.collapse = collapse if self.input_format not in [ - 'NCTHW', 'NCHW', 'NCHW_Flow', 'NCTHW_Heatmap', 'NPTCHW' + 'NCTHW', 'NCHW', 'NCTHW_Heatmap', 'NPTCHW' ]: raise ValueError( f'The input format {self.input_format} is invalid.') @@ -300,36 +304,14 @@ def transform(self, results: Dict) -> Dict: elif self.input_format == 'NCHW': imgs = results['imgs'] imgs = np.transpose(imgs, (0, 3, 1, 2)) + if 'modality' in results and results['modality'] == 'Flow': + clip_len = results['clip_len'] + imgs = imgs.reshape((-1, clip_len * imgs.shape[1]) + + imgs.shape[2:]) # M x C x H x W results['imgs'] = imgs results['input_shape'] = imgs.shape - elif self.input_format == 'NCHW_Flow': - num_imgs = len(results['imgs']) - assert num_imgs % 2 == 0 - n = num_imgs // 2 - h, w = results['imgs'][0].shape - x_flow = np.empty((n, h, w), dtype=np.float32) - y_flow = np.empty((n, h, w), dtype=np.float32) - for i in range(n): - x_flow[i] = results['imgs'][2 * i] - y_flow[i] = results['imgs'][2 * i + 1] - imgs = np.stack([x_flow, y_flow], axis=-1) - - num_clips = results['num_clips'] - clip_len = results['clip_len'] - imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:]) - # N_crops x N_clips x T x H x W x C - imgs = np.transpose(imgs, (0, 1, 2, 5, 3, 4)) - # N_crops x N_clips x T x C x H x W - imgs = imgs.reshape((-1, imgs.shape[2] * imgs.shape[3]) + - imgs.shape[4:]) - # M' x C' x H x W - # M' = N_crops x N_clips - # C' = T x C - results['imgs'] = imgs - results['input_shape'] = imgs.shape - elif self.input_format == 'NPTCHW': num_proposals = results['num_proposals'] num_clips = results['num_clips'] diff --git a/mmaction/datasets/transforms/loading.py b/mmaction/datasets/transforms/loading.py index 22070371a1..8d789ab4c3 100644 --- a/mmaction/datasets/transforms/loading.py +++ b/mmaction/datasets/transforms/loading.py @@ -1418,11 +1418,7 @@ def transform(self, results: dict) -> dict: for i, frame_idx in enumerate(results['frame_inds']): # Avoid loading duplicated frames if frame_idx in cache: - if modality == 'RGB': - imgs.append(cp.deepcopy(imgs[cache[frame_idx]])) - else: - imgs.append(cp.deepcopy(imgs[2 * cache[frame_idx]])) - imgs.append(cp.deepcopy(imgs[2 * cache[frame_idx] + 1])) + imgs.append(cp.deepcopy(imgs[cache[frame_idx]])) continue else: cache[frame_idx] = i @@ -1443,7 +1439,7 @@ def transform(self, results: dict) -> dict: x_frame = mmcv.imfrombytes(x_img_bytes, flag='grayscale') y_img_bytes = self.file_client.get(y_filepath) y_frame = mmcv.imfrombytes(y_img_bytes, flag='grayscale') - imgs.extend([x_frame, y_frame]) + imgs.append(np.stack([x_frame, y_frame], axis=-1)) else: raise NotImplementedError diff --git a/tests/datasets/transforms/test_formating.py b/tests/datasets/transforms/test_formating.py index 4668732746..e12a1a95d7 100644 --- a/tests/datasets/transforms/test_formating.py +++ b/tests/datasets/transforms/test_formating.py @@ -191,12 +191,21 @@ def test_format_shape(): # invalid input format FormatShape('NHWC') - # 'NCHW' input format + # 'NCHW' input format (RGB Modality) results = dict( imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3) format_shape = FormatShape('NCHW') assert format_shape(results)['input_shape'] == (3, 3, 224, 224) + # `NCHW` input format (Flow Modality) + results = dict( + imgs=np.random.randn(3, 224, 224, 2), + num_clips=1, + clip_len=3, + modality='Flow') + format_shape = FormatShape('NCHW') + assert format_shape(results)['input_shape'] == (1, 6, 224, 224) + # `NCTHW` input format with num_clips=1, clip_len=3 results = dict( imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3) @@ -229,11 +238,6 @@ def test_format_shape(): format_shape = FormatShape('NCTHW_Heatmap') assert format_shape(results)['input_shape'] == (2, 17, 6, 56, 56) - # `NCHW_Flow` input format - results = dict(imgs=np.random.randn(6, 224, 224), num_clips=1, clip_len=3) - format_shape = FormatShape('NCHW_Flow') - assert format_shape(results)['input_shape'] == (1, 6, 224, 224) - # `NPTCHW` input format results = dict( imgs=np.random.randn(72, 224, 224, 3), diff --git a/tests/datasets/transforms/test_loading.py b/tests/datasets/transforms/test_loading.py index ee2cc64717..888c993fd5 100644 --- a/tests/datasets/transforms/test_loading.py +++ b/tests/datasets/transforms/test_loading.py @@ -486,8 +486,8 @@ def test_rawframe_decode(self): frame_selector = RawFrameDecode(io_backend='disk') results = frame_selector(inputs) assert assert_dict_has_keys(results, target_keys) - assert np.shape(results['imgs']) == (len(inputs['frame_inds']) * 2, - 240, 320) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240, + 320, 2) assert results['original_shape'] == (240, 320) # test frame selector with 1 dim input for flow images @@ -496,8 +496,8 @@ def test_rawframe_decode(self): frame_selector = RawFrameDecode(io_backend='disk') results = frame_selector(inputs) assert assert_dict_has_keys(results, target_keys) - assert np.shape(results['imgs']) == (len(inputs['frame_inds']) * 2, - 240, 320) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240, + 320, 2) assert results['original_shape'] == (240, 320) return From c164eaf0a1c691d45ef01adf42357cb62d2253d0 Mon Sep 17 00:00:00 2001 From: LinXiaoZheng <90811472+Zheng-LinXiao@users.noreply.github.com> Date: Mon, 21 Aug 2023 17:16:26 +0800 Subject: [PATCH 09/24] [Docs] Update Audio Docs (#2645) --- docs/en/user_guides/prepare_dataset.md | 44 ++++++++++++++++++++++- docs/zh_cn/user_guides/prepare_dataset.md | 44 ++++++++++++++++++++++- 2 files changed, 86 insertions(+), 2 deletions(-) diff --git a/docs/en/user_guides/prepare_dataset.md b/docs/en/user_guides/prepare_dataset.md index c13d448106..35f2aacc5f 100644 --- a/docs/en/user_guides/prepare_dataset.md +++ b/docs/en/user_guides/prepare_dataset.md @@ -8,6 +8,7 @@ MMAction2 supports many existing datasets. In this chapter, we will lead you to - [Use a custom dataset](#use-a-custom-dataset) - [Action Recognition](#action-recognition) - [Skeleton-based Action Recognition](#skeleton-based-action-recognition) + - [Audio-based Action Recognition](#audio-based-action-recognition) - [Spatio-temporal Action Detection](#spatio-temporal-action-detection) - [Temporal Action Localization](#temporal-action-localization) - [Use mixed datasets for training](#use-mixed-datasets-for-training) @@ -24,7 +25,7 @@ To make video decoding faster, we support several efficient video loading librar ## Use built-in datasets -MMAction2 already supports many datasets, we provide shell scripts for data preparation under the path `$MMACTION2/tools/data/`, please refer to [supported datasets](../datasetzoo_satatistics.md) for details to prepare specific datasets. +MMAction2 already supports many datasets, we provide shell scripts for data preparation under the path `$MMACTION2/tools/data/`, please refer to [supported datasets](https://mmaction2.readthedocs.io/en/latest/datasetzoo_statistics.html) for details to prepare specific datasets. ## Use a custom dataset @@ -32,6 +33,7 @@ The simplest way is to convert your dataset to existing dataset formats: - `RawFrameDataset` and `VideoDataset` for [Action Recognition](#action-recognition) - `PoseDataset` for [Skeleton-based Action Recognition](#skeleton-based-action-recognition) +- `AudioDataset` for [Audio-based Action Recognition](#Audio-based-action-recognition) - `AVADataset` for [Spatio-temporal Action Detection](#spatio-temporal-action-detection) - `ActivityNetDataset` for [Temporal Action Localization](#temporal-action-localization) @@ -172,6 +174,46 @@ The task recognizes the action class based on the skeleton sequence (time sequen Support other keypoint formats needs further modification, please refer to [customize dataset](../advanced_guides/customize_dataset.md). +### Audio-based Action Recognition + +MMAction2 provides support for audio-based action recognition tasks utilizing the `AudioDataset`. This task employs mel spectrogram features as input. An example annotation file format is as follows: + +``` +ihWykL5mYRI.npy 300 153 +lumzQD42AN8.npy 240 321 +sWFRmD9Of4s.npy 250 250 +w_IpfgRsBVA.npy 300 356 +``` + +Each line represents a training sample. Taking the first line as an example, `ihWykL5mYRI.npy` corresponds to the filename of the mel spectrogram feature. The value `300` represents the total number of frames of the original video corresponding to this mel spectrogram feature, and `153` denotes the class label. We take the following two steps to perpare the mel spectrogram feature data: + +First, extract `audios` from videos: + +```shell +cd $MMACTION2 +python tools/data/extract_audio.py ${ROOT} ${DST_ROOT} [--ext ${EXT}] [--num-workers ${N_WORKERS}] \ + [--level ${LEVEL}] +``` + +- `ROOT`: The root directory of the videos. +- `DST_ROOT`: The destination root directory of the audios. +- `EXT`: Extension of the video files. e.g., `mp4`. +- `N_WORKERS`: Number of processes to be used. + +Next, offline generate the `mel spectrogram features` from the audios: + +```shell +cd $MMACTION2 +python tools/data/build_audio_features.py ${AUDIO_HOME_PATH} ${SPECTROGRAM_SAVE_PATH} [--level ${LEVEL}] \ + [--ext $EXT] [--num-workers $N_WORKERS] [--part $PART] +``` + +- `AUDIO_HOME_PATH`: The root directory of the audio files. +- `SPECTROGRAM_SAVE_PATH`: The destination root directory of the audio features. +- `EXT`: Extension of the audio files. e.g., `m4a`. +- `N_WORKERS`: Number of processes to be used. +- `PART`: Determines how many parts to be splited and which part to run. e.g., `2/5` means splitting all files into 5-fold and executing the 2nd part. This is useful if you have several machines. + ### Spatio-temporal Action Detection MMAction2 supports the task based on `AVADataset`. The annotation contains groundtruth bbox and proposal bbox. diff --git a/docs/zh_cn/user_guides/prepare_dataset.md b/docs/zh_cn/user_guides/prepare_dataset.md index b8cdfee69b..44348580bc 100644 --- a/docs/zh_cn/user_guides/prepare_dataset.md +++ b/docs/zh_cn/user_guides/prepare_dataset.md @@ -8,6 +8,7 @@ MMAction2 ๆ”ฏๆŒ่ฎธๅคš็Žฐๆœ‰็š„ๆ•ฐๆฎ้›†ใ€‚ๅœจๆœฌ็ซ ไธญ๏ผŒๆˆ‘ไปฌๅฐ†ๅผ•ๅฏผๆ‚จๅ‡† - [ไฝฟ็”จ่‡ชๅฎšไน‰ๆ•ฐๆฎ้›†](#ไฝฟ็”จ่‡ชๅฎšไน‰ๆ•ฐๆฎ้›†) - [ๅŠจไฝœ่ฏ†ๅˆซ](#ๅŠจไฝœ่ฏ†ๅˆซ) - [ๅŸบไบŽ้ชจ้ชผ็š„ๅŠจไฝœ่ฏ†ๅˆซ](#ๅŸบไบŽ้ชจ้ชผ็š„ๅŠจไฝœ่ฏ†ๅˆซ) + - [ๅŸบไบŽ้Ÿณ้ข‘็š„ๅŠจไฝœ่ฏ†ๅˆซ](#ๅŸบไบŽ้Ÿณ้ข‘็š„ๅŠจไฝœ่ฏ†ๅˆซ) - [ๆ—ถ็ฉบๅŠจไฝœๆฃ€ๆต‹](#ๆ—ถ็ฉบๅŠจไฝœๆฃ€ๆต‹) - [ๆ—ถๅบๅŠจไฝœๅฎšไฝ](#ๆ—ถๅบๅŠจไฝœๅฎšไฝ) - [ไฝฟ็”จๆททๅˆๆ•ฐๆฎ้›†่ฟ›่กŒ่ฎญ็ปƒ](#ไฝฟ็”จๆททๅˆๆ•ฐๆฎ้›†่ฟ›่กŒ่ฎญ็ปƒ) @@ -20,7 +21,7 @@ MMAction2 ๆ”ฏๆŒไธค็ง็ฑปๅž‹็š„ๆ•ฐๆฎๆ ผๅผ๏ผšๅŽŸๅง‹ๅธงๅ’Œ่ง†้ข‘ใ€‚ๅ‰่€…ๅœจไน‹ ## ไฝฟ็”จๅ†…็ฝฎๆ•ฐๆฎ้›† -MMAction2 ๅทฒ็ปๆ”ฏๆŒ่ฎธๅคšๆ•ฐๆฎ้›†๏ผŒๆˆ‘ไปฌๅœจ่ทฏๅพ„ `$MMACTION2/tools/data/` ไธ‹ๆไพ›ไบ†็”จไบŽๆ•ฐๆฎๅ‡†ๅค‡็š„ shell ่„šๆœฌ๏ผŒ่ฏทๅ‚่€ƒ[ๆ”ฏๆŒ็š„ๆ•ฐๆฎ้›†](../datasetzoo_satatistics.md)ไปฅ่Žทๅ–ๅ‡†ๅค‡็‰นๅฎšๆ•ฐๆฎ้›†็š„่ฏฆ็ป†ไฟกๆฏใ€‚ +MMAction2 ๅทฒ็ปๆ”ฏๆŒ่ฎธๅคšๆ•ฐๆฎ้›†๏ผŒๆˆ‘ไปฌๅœจ่ทฏๅพ„ `$MMACTION2/tools/data/` ไธ‹ๆไพ›ไบ†็”จไบŽๆ•ฐๆฎๅ‡†ๅค‡็š„ shell ่„šๆœฌ๏ผŒ่ฏทๅ‚่€ƒ[ๆ”ฏๆŒ็š„ๆ•ฐๆฎ้›†](https://mmaction2.readthedocs.io/zh_CN/latest/datasetzoo_statistics.html)ไปฅ่Žทๅ–ๅ‡†ๅค‡็‰นๅฎšๆ•ฐๆฎ้›†็š„่ฏฆ็ป†ไฟกๆฏใ€‚ ## ไฝฟ็”จ่‡ชๅฎšไน‰ๆ•ฐๆฎ้›† @@ -28,6 +29,7 @@ MMAction2 ๅทฒ็ปๆ”ฏๆŒ่ฎธๅคšๆ•ฐๆฎ้›†๏ผŒๆˆ‘ไปฌๅœจ่ทฏๅพ„ `$MMACTION2/tools/data/` - `RawFrameDataset` ๅ’Œ `VideoDataset` ็”จไบŽ[ๅŠจไฝœ่ฏ†ๅˆซ](#ๅŠจไฝœ่ฏ†ๅˆซ) - `PoseDataset` ็”จไบŽ[ๅŸบไบŽ้ชจ้ชผ็š„ๅŠจไฝœ่ฏ†ๅˆซ](#ๅŸบไบŽ้ชจ้ชผ็š„ๅŠจไฝœ่ฏ†ๅˆซ) +- `AudioDataset` ็”จไบŽ[ๅŸบไบŽ้Ÿณ้ข‘ๅŠจไฝœ่ฏ†ๅˆซ](#ๅŸบไบŽ้Ÿณ้ข‘ๅŠจไฝœ่ฏ†ๅˆซ) - `AVADataset` ็”จไบŽ[ๆ—ถ็ฉบๅŠจไฝœๆฃ€ๆต‹](#ๆ—ถ็ฉบๅŠจไฝœๆฃ€ๆต‹) - `ActivityNetDataset` ็”จไบŽ[ๆ—ถๅบๅŠจไฝœๅฎšไฝ](#ๆ—ถๅบๅŠจไฝœๅฎšไฝ) @@ -163,6 +165,46 @@ data = dict( ๆ”ฏๆŒๅ…ถไป–ๅ…ณ้”ฎ็‚นๆ ผๅผ้œ€่ฆ่ฟ›่กŒ่ฟ›ไธ€ๆญฅไฟฎๆ”น๏ผŒ่ฏทๅ‚่€ƒ[่‡ชๅฎšไน‰ๆ•ฐๆฎ้›†](../advanced_guides/customize_dataset.md)ใ€‚ +### ๅŸบไบŽ้Ÿณ้ข‘็š„ๅŠจไฝœ่ฏ†ๅˆซ + +MMAction2 ๆ”ฏๆŒๅŸบไบŽ `AudioDataset` ็š„้Ÿณ้ข‘ๅŠจไฝœ่ฏ†ๅˆซไปปๅŠกใ€‚่ฏฅไปปๅŠกไฝฟ็”จๆข…ๅฐ”้ข‘่ฐฑ็‰นๅพไฝœไธบ่พ“ๅ…ฅ, ๆณจ้‡Šๆ–‡ไปถๆ ผๅผ็คบไพ‹ๅฆ‚ไธ‹๏ผš + +``` +ihWykL5mYRI.npy 300 153 +lumzQD42AN8.npy 240 321 +sWFRmD9Of4s.npy 250 250 +w_IpfgRsBVA.npy 300 356 +``` + +ๆฏไธ€่กŒไปฃ่กจไธ€ไธช่ฎญ็ปƒๆ ทๆœฌ๏ผŒไปฅ็ฌฌไธ€่กŒไธบไพ‹๏ผŒ`ihWykL5mYRI.npy` ไธบๆข…ๅฐ”้ข‘่ฐฑ็‰นๅพ็š„ๆ–‡ไปถๅ๏ผŒ`300` ไธบ่ฏฅๆข…ๅฐ”้ข‘่ฐฑ็‰นๅพๆ–‡ไปถๅฏนๅบ”็š„ๅŽŸ่ง†้ข‘ๆ–‡ไปถ็š„ๆ€ปๅธงๆ•ฐ๏ผŒ`153` ไธบ็ฑปๅˆซๆ ‡็ญพใ€‚ๆˆ‘ไปฌๅˆ†ไปฅไธ‹ไธค้˜ถๆฎต็”Ÿๆˆๆ‰€้œ€่ฆ็š„ๆข…ๅฐ”้ข‘่ฐฑ็‰นๅพๆ–‡ไปถๆ•ฐๆฎ๏ผš + +้ฆ–ๅ…ˆ๏ผŒ้€š่ฟ‡่ง†้ข‘ๆ–‡ไปถๆๅ–`้Ÿณ้ข‘ๆ–‡ไปถ`: + +``` +cd $MMACTION2 +python tools/data/extract_audio.py ${ROOT} ${DST_ROOT} [--ext ${EXT}] [--num-workers ${N_WORKERS}] \ + [--level ${LEVEL}] +``` + +- `ROOT`: ่ง†้ข‘็š„ๆ น็›ฎๅฝ•ใ€‚ +- `DST_ROOT`: ๅญ˜ๆ”พ็”Ÿๆˆ้Ÿณ้ข‘็š„ๆ น็›ฎๅฝ•ใ€‚ +- `EXT`: ่ง†้ข‘็š„ๅŽ็ผ€ๅ๏ผŒๅฆ‚ `mp4`ใ€‚ +- `N_WORKERS`: ไฝฟ็”จ็š„่ฟ›็จ‹ๆ•ฐ้‡ใ€‚ + +ไธ‹ไธ€ๆญฅ๏ผŒไปŽ้Ÿณ้ข‘ๆ–‡ไปถ็”Ÿๆˆ`ๆข…ๅฐ”้ข‘่ฐฑ็‰นๅพ`: + +``` +cd $MMACTION2 +python tools/data/build_audio_features.py ${AUDIO_HOME_PATH} ${SPECTROGRAM_SAVE_PATH} [--level ${LEVEL}] \ + [--ext $EXT] [--num-workers $N_WORKERS] [--part $PART] +``` + +- `AUDIO_HOME_PATH`: ้Ÿณ้ข‘ๆ–‡ไปถ็š„ๆ น็›ฎๅฝ•ใ€‚ +- `SPECTROGRAM_SAVE_PATH`: ๅญ˜ๆ”พ็”Ÿๆˆ้Ÿณ้ข‘็‰นๅพ็š„ๆ น็›ฎๅฝ•ใ€‚ +- `EXT`: ้Ÿณ้ข‘็š„ๅŽ็ผ€ๅ๏ผŒๅฆ‚ `m4a`ใ€‚ +- `N_WORKERS`: ไฝฟ็”จ็š„่ฟ›็จ‹ๆ•ฐ้‡ใ€‚ +- `PART`: ๅฐ†ๅฎŒๆ•ด็š„่งฃ็ ไปปๅŠกๅˆ†ไธบๅ‡ ้ƒจๅˆ†ๅนถๆ‰ง่กŒๅ…ถไธญไธ€ไปฝใ€‚ๅฆ‚ `2/5` ่กจ็คบๅฐ†ๆ‰€ๆœ‰ๅพ…่งฃ็ ๆ•ฐๆฎๅˆ†ๆˆ 5 ไปฝ๏ผŒๅนถๅฏนๅ…ถไธญ็š„็ฌฌ 2 ไปฝ่ฟ›่กŒ่งฃ็ ใ€‚่ฟ™ไธ€้€‰้กนๅœจ็”จๆˆทๆœ‰ๅคšๅฐๆœบๅ™จๆ—ถๅ‘ๆŒฅไฝœ็”จใ€‚ + ### ๆ—ถ็ฉบๅŠจไฝœๆฃ€ๆต‹ MMAction2 ๆ”ฏๆŒๅŸบไบŽ `AVADataset` ็š„ๆ—ถ็ฉบๅŠจไฝœๆฃ€ๆต‹ไปปๅŠกใ€‚ๆณจ้‡ŠๅŒ…ๅซ็œŸๅฎž่พน็•Œๆก†ๅ’Œๆ่ฎฎ่พน็•Œๆก†ใ€‚ From 07bf7fbd12208c01057351b0b35041a02c1205aa Mon Sep 17 00:00:00 2001 From: Zhao Cake Date: Thu, 24 Aug 2023 20:25:11 +0800 Subject: [PATCH 10/24] [Feature] Add new configuration files for Swin (#2649) --- ...pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py | 1 + ...re_16xb8_amp_32x2x1_30e_kinetics700_rgb.py | 56 +++++++ ...pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py | 156 ++++++++++++++++++ ...re_32xb4_amp_32x2x1_30e_kinetics710_rgb.py | 108 ++++++++++++ ...pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py | 155 +++++++++++++++++ ...pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py | 153 +++++++++++++++++ 6 files changed, 629 insertions(+) create mode 100644 mmaction/configs/recognition/swin/swin_large_p244_w877_in22k_pre_16xb8_amp_32x2x1_30e_kinetics700_rgb.py create mode 100644 mmaction/configs/recognition/swin/swin_large_p244_w877_in22k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py create mode 100644 mmaction/configs/recognition/swin/swin_small_p244_w877_in1k_pre_32xb4_amp_32x2x1_30e_kinetics710_rgb.py create mode 100644 mmaction/configs/recognition/swin/swin_small_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py create mode 100644 mmaction/configs/recognition/swin/swin_tiny_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py diff --git a/mmaction/configs/recognition/swin/swin_base_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py b/mmaction/configs/recognition/swin/swin_base_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py index 187ebf4a62..e3d3377630 100644 --- a/mmaction/configs/recognition/swin/swin_base_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py +++ b/mmaction/configs/recognition/swin/swin_base_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py @@ -28,6 +28,7 @@ cls_head=dict(in_channels=1024))) # dataset settings +dataset_type = VideoDataset data_root = 'data/kinetics400/videos_train' data_root_val = 'data/kinetics400/videos_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' diff --git a/mmaction/configs/recognition/swin/swin_large_p244_w877_in22k_pre_16xb8_amp_32x2x1_30e_kinetics700_rgb.py b/mmaction/configs/recognition/swin/swin_large_p244_w877_in22k_pre_16xb8_amp_32x2x1_30e_kinetics700_rgb.py new file mode 100644 index 0000000000..a0a7b84303 --- /dev/null +++ b/mmaction/configs/recognition/swin/swin_large_p244_w877_in22k_pre_16xb8_amp_32x2x1_30e_kinetics700_rgb.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +with read_base(): + from .swin_large_p244_w877_in22k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb import * # noqa: E501 + +model.update(dict(cls_head=dict(num_classes=700))) + +# dataset +data_root = 'data/kinetics700/videos_train' +data_root_val = 'data/kinetics700/videos_val' +ann_file_train = 'data/kinetics700/kinetics700_train_list_videos.txt' +ann_file_val = 'data/kinetics700/kinetics700_val_list_videos.txt' +ann_file_test = 'data/kinetics700/kinetics700_val_list_videos.txt' + +dataset_type = VideoDataset +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +optim_wrapper.update(dict(optimizer=dict(lr=2e-3))) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (16 GPUs) x (8 samples per GPU). +auto_scale_lr.update(dict(enable=False, base_batch_size=128)) diff --git a/mmaction/configs/recognition/swin/swin_large_p244_w877_in22k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py b/mmaction/configs/recognition/swin/swin_large_p244_w877_in22k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py new file mode 100644 index 0000000000..b4c23ecfea --- /dev/null +++ b/mmaction/configs/recognition/swin/swin_large_p244_w877_in22k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py @@ -0,0 +1,156 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +with read_base(): + from ..._base_.models.swin_tiny import * + from ..._base_.default_runtime import * + +from mmengine.dataset import DefaultSampler +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from mmengine.runner import EpochBasedTrainLoop, TestLoop, ValLoop +from torch.optim import AdamW + +from mmaction.datasets import (CenterCrop, DecordDecode, DecordInit, Flip, + FormatShape, PackActionInputs, + RandomResizedCrop, Resize, SampleFrames, + ThreeCrop, VideoDataset) +from mmaction.engine import SwinOptimWrapperConstructor +from mmaction.evaluation import AccMetric + +model.update( + dict( + backbone=dict( + arch='large', + drop_path_rate=0.4, + pretrained= # noqa: E251 + 'https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin_large_patch4_window7_224_22k.pth' # noqa: E501 + ), + cls_head=dict(in_channels=1536))) + +# dataset settings +dataset_type = VideoDataset +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict(type=DecordInit, **file_client_args), + dict(type=SampleFrames, clip_len=32, frame_interval=2, num_clips=1), + dict(type=DecordDecode), + dict(type=Resize, scale=(-1, 256)), + dict(type=RandomResizedCrop), + dict(type=Resize, scale=(224, 224), keep_ratio=False), + dict(type=Flip, flip_ratio=0.5), + dict(type=FormatShape, input_format='NCTHW'), + dict(type=PackActionInputs) +] +val_pipeline = [ + dict(type=DecordInit, **file_client_args), + dict( + type=SampleFrames, + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type=DecordDecode), + dict(type=Resize, scale=(-1, 256)), + dict(type=CenterCrop, crop_size=224), + dict(type=FormatShape, input_format='NCTHW'), + dict(type=PackActionInputs) +] +test_pipeline = [ + dict(type=DecordInit, **file_client_args), + dict( + type=SampleFrames, + clip_len=32, + frame_interval=2, + num_clips=4, + test_mode=True), + dict(type=DecordDecode), + dict(type=Resize, scale=(-1, 224)), + dict(type=ThreeCrop, crop_size=224), + dict(type=FormatShape, input_format='NCTHW'), + dict(type=PackActionInputs) +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type=AccMetric) +test_evaluator = val_evaluator + +train_cfg = dict( + type=EpochBasedTrainLoop, max_epochs=30, val_begin=1, val_interval=3) +val_cfg = dict(type=ValLoop) +test_cfg = dict(type=TestLoop) + +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict(type=AdamW, lr=1e-3, betas=(0.9, 0.999), weight_decay=0.05), + constructor=SwinOptimWrapperConstructor, + paramwise_cfg=dict( + absolute_pos_embed=dict(decay_mult=0.), + relative_position_bias_table=dict(decay_mult=0.), + norm=dict(decay_mult=0.), + backbone=dict(lr_mult=0.1))) + +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.1, + by_epoch=True, + begin=0, + end=2.5, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=30, + eta_min=0, + by_epoch=True, + begin=0, + end=30) +] + +default_hooks.update( + dict( + checkpoint=dict(interval=3, max_keep_ckpts=5), + logger=dict(interval=100))) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/mmaction/configs/recognition/swin/swin_small_p244_w877_in1k_pre_32xb4_amp_32x2x1_30e_kinetics710_rgb.py b/mmaction/configs/recognition/swin/swin_small_p244_w877_in1k_pre_32xb4_amp_32x2x1_30e_kinetics710_rgb.py new file mode 100644 index 0000000000..a16bca3af6 --- /dev/null +++ b/mmaction/configs/recognition/swin/swin_small_p244_w877_in1k_pre_32xb4_amp_32x2x1_30e_kinetics710_rgb.py @@ -0,0 +1,108 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +with read_base(): + from .swin_small_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb import * # noqa: E501 + +from mmengine.dataset import DefaultSampler +from torch.utils.data import ConcatDataset + +model.update(dict(cls_head=dict(num_classes=710))) + +k400_data_root = 'data/kinetics400/videos_train' +k600_data_root = 'data/kinetics600/videos' +k700_data_root = 'data/kinetics700/videos' +k400_data_root_val = 'data/kinetics400/videos_val' +k600_data_root_val = k600_data_root +k700_data_root_val = k700_data_root + +k400_ann_file_train = 'data/kinetics710/k400_train_list_videos.txt' +k600_ann_file_train = 'data/kinetics710/k600_train_list_videos.txt' +k700_ann_file_train = 'data/kinetics710/k700_train_list_videos.txt' + +k400_ann_file_val = 'data/kinetics710/k400_val_list_videos.txt' +k600_ann_file_val = 'data/kinetics710/k600_val_list_videos.txt' +k700_ann_file_val = 'data/kinetics710/k700_val_list_videos.txt' + +k400_trainset = dict( + type=VideoDataset, + ann_file=k400_ann_file_train, + data_prefix=dict(video=k400_data_root), + pipeline=train_pipeline) +k600_trainset = dict( + type=VideoDataset, + ann_file=k600_ann_file_train, + data_prefix=dict(video=k600_data_root), + pipeline=train_pipeline) +k700_trainset = dict( + type=VideoDataset, + ann_file=k700_ann_file_train, + data_prefix=dict(video=k700_data_root), + pipeline=train_pipeline) + +k400_valset = dict( + type=VideoDataset, + ann_file=k400_ann_file_val, + data_prefix=dict(video=k400_data_root_val), + pipeline=val_pipeline, + test_mode=True) +k600_valset = dict( + type=VideoDataset, + ann_file=k600_ann_file_val, + data_prefix=dict(video=k600_data_root_val), + pipeline=val_pipeline, + test_mode=True) +k700_valset = dict( + type=VideoDataset, + ann_file=k700_ann_file_val, + data_prefix=dict(video=k700_data_root_val), + pipeline=val_pipeline, + test_mode=True) + +k400_testset = k400_valset.copy() +k600_testset = k600_valset.copy() +k700_testset = k700_valset.copy() +k400_testset['pipeline'] = test_pipeline +k600_testset['pipeline'] = test_pipeline +k700_testset['pipeline'] = test_pipeline + +k710_trainset = dict( + type=ConcatDataset, + datasets=[k400_trainset, k600_trainset, k700_trainset], + _delete_=True) +k710_valset = dict( + type=ConcatDataset, + datasets=[k400_valset, k600_valset, k700_valset], + _delete_=True) +k710_testset = dict( + type=ConcatDataset, + datasets=[k400_testset, k600_testset, k700_testset], + _delete_=True, +) + +train_dataloader = dict( + batch_size=4, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=True), + dataset=k710_trainset) +val_dataloader = dict( + batch_size=4, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=k710_valset) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=k710_testset) + +optim_wrapper.update(dict(optimizer=dict(lr=2e-3))) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (16 GPUs) x (8 samples per GPU). +auto_scale_lr.update(dict(enable=False, base_batch_size=128)) diff --git a/mmaction/configs/recognition/swin/swin_small_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py b/mmaction/configs/recognition/swin/swin_small_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py new file mode 100644 index 0000000000..1536ee72f3 --- /dev/null +++ b/mmaction/configs/recognition/swin/swin_small_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +with read_base(): + from ..._base_.models.swin_tiny import * + from ..._base_.default_runtime import * + +from mmengine.dataset import DefaultSampler +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from mmengine.runner import EpochBasedTrainLoop, TestLoop, ValLoop +from torch.optim import AdamW + +from mmaction.datasets import (CenterCrop, DecordDecode, DecordInit, Flip, + FormatShape, PackActionInputs, + RandomResizedCrop, Resize, SampleFrames, + ThreeCrop, VideoDataset) +from mmaction.engine import SwinOptimWrapperConstructor +from mmaction.evaluation import AccMetric + +model.update( + dict( + backbone=dict( + arch='small', + drop_path_rate=0.2, + pretrained= # noqa: E251 + 'https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin_small_patch4_window7_224.pth' # noqa: E501 + ))) + +# dataset settings +dataset_type = VideoDataset +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict(type=DecordInit, **file_client_args), + dict(type=SampleFrames, clip_len=32, frame_interval=2, num_clips=1), + dict(type=DecordDecode), + dict(type=Resize, scale=(-1, 256)), + dict(type=RandomResizedCrop), + dict(type=Resize, scale=(224, 224), keep_ratio=False), + dict(type=Flip, flip_ratio=0.5), + dict(type=FormatShape, input_format='NCTHW'), + dict(type=PackActionInputs) +] +val_pipeline = [ + dict(type=DecordInit, **file_client_args), + dict( + type=SampleFrames, + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type=DecordDecode), + dict(type=Resize, scale=(-1, 256)), + dict(type=CenterCrop, crop_size=224), + dict(type=FormatShape, input_format='NCTHW'), + dict(type=PackActionInputs) +] +test_pipeline = [ + dict(type=DecordInit, **file_client_args), + dict( + type=SampleFrames, + clip_len=32, + frame_interval=2, + num_clips=4, + test_mode=True), + dict(type=DecordDecode), + dict(type=Resize, scale=(-1, 224)), + dict(type=ThreeCrop, crop_size=224), + dict(type=FormatShape, input_format='NCTHW'), + dict(type=PackActionInputs) +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type=AccMetric) +test_evaluator = val_evaluator + +train_cfg = dict( + type=EpochBasedTrainLoop, max_epochs=30, val_begin=1, val_interval=3) +val_cfg = dict(type=ValLoop) +test_cfg = dict(type=TestLoop) + +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict(type=AdamW, lr=1e-3, betas=(0.9, 0.999), weight_decay=0.02), + constructor=SwinOptimWrapperConstructor, + paramwise_cfg=dict( + absolute_pos_embed=dict(decay_mult=0.), + relative_position_bias_table=dict(decay_mult=0.), + norm=dict(decay_mult=0.), + backbone=dict(lr_mult=0.1))) + +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.1, + by_epoch=True, + begin=0, + end=2.5, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=30, + eta_min=0, + by_epoch=True, + begin=0, + end=30) +] + +default_hooks.update( + dict( + checkpoint=dict(interval=3, max_keep_ckpts=5), + logger=dict(interval=100))) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/mmaction/configs/recognition/swin/swin_tiny_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py b/mmaction/configs/recognition/swin/swin_tiny_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py new file mode 100644 index 0000000000..4d7fa07d55 --- /dev/null +++ b/mmaction/configs/recognition/swin/swin_tiny_p244_w877_in1k_pre_8xb8_amp_32x2x1_30e_kinetics400_rgb.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +with read_base(): + from ..._base_.models.swin_tiny import * + from ..._base_.default_runtime import * + +from mmengine.dataset import DefaultSampler +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from mmengine.runner import EpochBasedTrainLoop, TestLoop, ValLoop +from torch.optim import AdamW + +from mmaction.datasets import (CenterCrop, DecordDecode, DecordInit, Flip, + FormatShape, PackActionInputs, + RandomResizedCrop, Resize, SampleFrames, + ThreeCrop, VideoDataset) +from mmaction.engine import SwinOptimWrapperConstructor +from mmaction.evaluation import AccMetric + +model.update( + dict( + backbone=dict( + pretrained= # noqa: E251 + 'https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin_tiny_patch4_window7_224.pth' # noqa: E501 + ))) + +# dataset settings +dataset_type = VideoDataset +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict(type=DecordInit, **file_client_args), + dict(type=SampleFrames, clip_len=32, frame_interval=2, num_clips=1), + dict(type=DecordDecode), + dict(type=Resize, scale=(-1, 256)), + dict(type=RandomResizedCrop), + dict(type=Resize, scale=(224, 224), keep_ratio=False), + dict(type=Flip, flip_ratio=0.5), + dict(type=FormatShape, input_format='NCTHW'), + dict(type=PackActionInputs) +] +val_pipeline = [ + dict(type=DecordInit, **file_client_args), + dict( + type=SampleFrames, + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type=DecordDecode), + dict(type=Resize, scale=(-1, 256)), + dict(type=CenterCrop, crop_size=224), + dict(type=FormatShape, input_format='NCTHW'), + dict(type=PackActionInputs) +] +test_pipeline = [ + dict(type=DecordInit, **file_client_args), + dict( + type=SampleFrames, + clip_len=32, + frame_interval=2, + num_clips=4, + test_mode=True), + dict(type=DecordDecode), + dict(type=Resize, scale=(-1, 224)), + dict(type=ThreeCrop, crop_size=224), + dict(type=FormatShape, input_format='NCTHW'), + dict(type=PackActionInputs) +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type=AccMetric) +test_evaluator = val_evaluator + +train_cfg = dict( + type=EpochBasedTrainLoop, max_epochs=30, val_begin=1, val_interval=3) +val_cfg = dict(type=ValLoop) +test_cfg = dict(type=TestLoop) + +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict(type=AdamW, lr=1e-3, betas=(0.9, 0.999), weight_decay=0.02), + constructor=SwinOptimWrapperConstructor, + paramwise_cfg=dict( + absolute_pos_embed=dict(decay_mult=0.), + relative_position_bias_table=dict(decay_mult=0.), + norm=dict(decay_mult=0.), + backbone=dict(lr_mult=0.1))) + +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.1, + by_epoch=True, + begin=0, + end=2.5, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=30, + eta_min=0, + by_epoch=True, + begin=0, + end=30) +] + +default_hooks.update( + dict( + checkpoint=dict(interval=3, max_keep_ckpts=5), + logger=dict(interval=100))) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) From b220e11704b15be30afca480f4bf7605134bf985 Mon Sep 17 00:00:00 2001 From: "zhengjie.xu" Date: Wed, 30 Aug 2023 02:16:57 -0500 Subject: [PATCH 11/24] [Docs] Update QRcode (#2661) --- README_zh-CN.md | 4 ++-- resources/miaomiao_qrcode.jpg | Bin 0 -> 225737 bytes 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 resources/miaomiao_qrcode.jpg diff --git a/README_zh-CN.md b/README_zh-CN.md index 52b5096c2a..5e2b2ab241 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -380,10 +380,10 @@ MMAction2 ๆ˜ฏไธ€ๆฌพ็”ฑๆฅ่‡ชไธๅŒ้ซ˜ๆ กๅ’Œไผไธš็š„็ ”ๅ‘ไบบๅ‘˜ๅ…ฑๅŒๅ‚ไธŽ่ดก ## โค๏ธ ๆฌข่ฟŽๅŠ ๅ…ฅ OpenMMLab ็คพๅŒบ [๐Ÿ”](#-table-of-contents) -ๆ‰ซๆไธ‹ๆ–น็š„ไบŒ็ปด็ ๅฏๅ…ณๆณจ OpenMMLab ๅ›ข้˜Ÿ็š„ [็ŸฅไนŽๅฎ˜ๆ–น่ดฆๅท](https://www.zhihu.com/people/openmmlab)๏ผŒๅŠ ๅ…ฅ OpenMMLab ๅ›ข้˜Ÿ็š„ [ๅฎ˜ๆ–นไบคๆต QQ ็พค](https://jq.qq.com/?_wv=1027&k=aCvMxdr3) ๆˆ–่”็ปœ OpenMMLab ๅฎ˜ๆ–นๅพฎไฟกๅฐๅŠฉๆ‰‹ +ๆ‰ซๆไธ‹ๆ–น็š„ไบŒ็ปด็ ๅฏๅ…ณๆณจ OpenMMLab ๅ›ข้˜Ÿ็š„ [็ŸฅไนŽๅฎ˜ๆ–น่ดฆๅท](https://www.zhihu.com/people/openmmlab)๏ผŒๆ‰ซๆไธ‹ๆ–นๅพฎไฟกไบŒ็ปด็ ๆทปๅŠ ๅ–ตๅ–ตๅฅฝๅ‹๏ผŒ่ฟ›ๅ…ฅ MMAction2 ๅพฎไฟกไบคๆต็คพ็พคใ€‚ใ€ๅŠ ๅฅฝๅ‹็”ณ่ฏทๆ ผๅผ๏ผš็ ”็ฉถๆ–นๅ‘+ๅœฐๅŒบ+ๅญฆๆ ก/ๅ…ฌๅธ+ๅง“ๅใ€‘
- +
ๆˆ‘ไปฌไผšๅœจ OpenMMLab ็คพๅŒบไธบๅคงๅฎถ diff --git a/resources/miaomiao_qrcode.jpg b/resources/miaomiao_qrcode.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d34cbae6fd131d668b0f16bfe918993610257131 GIT binary patch literal 225737 zcmeFY1yo&6voE-T0Ko$Uw*VnH!QDN$OYmU9-8HzoYXSs!cL^3exVr{BxE*8;$^X0W ze($|`Yi8EWnptaZ!`geF?%MV1s;;iK+D~&&Yrrch@ekqv6chkJLH>ZJMIZq3>;L8S z^aDVD`C{Y+1`HG#0F4d>gAVo71CT%}2o3cD22#y`B}60yC}{YXFtBhho)&T zcIUr}|37s{0f4E0uh0dbFeoUMrZ!d%Ker;S}7J zE?@scz85*}QohJjsHd$Y_8W;NEW10J2cf_``M=mBzxL)0^%Ri18$W8()3^I_*z$dT z%H$HVDva{Zj*pK5INGUnbW{Jr>k^2r=iCObTzNeKi*9w^ zd|4!82OD+ltF%T6cez1yapzm9ImSzSJCfN2HDiKEXJg=!o3_wVtu~M_9bQisnd8u~ zQvp+i)U*-(>Fw6=Ymov>fVx^F`ptvo9!S#zhI9bU<{#N$UOWK}b$nRh_umfnsJvHQ zjO%N_30oq|Qf!6VIZN;Dxojk$HaDW1+GuUYH@y@mKjX&(C33VzRi1{d@T*V5({SGo+7JBp=cfB3fdyv>Xd=$--{7+al&6(1-(Gj2qjTWwk-;EY0 z5E+p#7m+b5ru5aZ;+Y;ocD=5%Z90uO3R4%l>)+Jit#6|G@1oUkipRdoKu4v9G2Z9U z(I4v&X5wL54GX4=K8vpCeQ1BdpB>7K5Qnn%50-xN<+U_%OA}@SSHt;v1M;PSAKw#j zcq79jx9bW3mb111T%_PX#6TDOW4CSpFR(Y;B?7@M+n&E(zEQ&wPiy{hlD;N26!ctNeSxB;%fJA()5^z{ot`V z#%Vxg20Z)+9?-cf*YbXX&JK*cev!Q3BCvSwvw7|1Jc)H~eU^AIl3UHzG?tB)@;iAI zuGfNpX)(0Cc-T2(2-?VBS3MiV-DFe`M{7|Ff}4Cfi!nYOf1zAWeu}Ys=KC*!80mjK zI__oBaaYkjlH9h-{Yb7H&XQ;|)w7f36rh#bw^Uh$u`DvgS3P~PpEs|6l%XAPN#3aV zH=ChkGTQ48+B0D!J2E@t=vLbI#W@k_3V>wALpY}w(gqK5@fHB}gGqIR#G;{pHtO?r2gRM2#JsB)$<7F>_a#2tqymi4e{z&Crr-BF z0@!|8O~!%|TK^l5|KC2r(K15DNUwKMQUCxW+foMrW)03?4L}$G)DVUR_>W$o!UjKk z1AuE?oN67nA7DFBpU&jlEhd90m976{TCPSe8}2KB%TcUkV~2jI+U=ggRxq~f3q{`| z@#pmyLbUL!S0v)Vra<@TbtjZAAw?N@M1e70;bO6r8kfH?%2o}<7hoh;3fMR3onOpw ziSa0Bx?U9qHloK?!R`Mrv48_h_wZ;{Lc=DWIJBJr;_jRiv!R}0C&$_TCM{oToP1WL z`DR7+8{T@7Jw#j*A^Ic7pO_A?^gBYMV0`Q%q1`G09A=KDNGu``#a8d<8{rZMyJp{T zc>Ll2(r*T3FnRPC5V@*scWws|$JS$cQwU@RY56_o8d#@bQJ4JP8E$r_eApxd?2mS` zB$rgOIjF48r?=BL6_xH+4T9%yVMWz8==Ox}8jr7aF#sX+<|W_d!$0iFkTf``R|QI0 za?u*S*b?~Gf{zS5k33<<^;^i7f9c1uV1DVIQOGsjN>c+RcR7Cm=T=DDb!Q5=ciHTZ z^0zwq(&hCjA8u7dS>|??%X_Eq*O*q|tO?Lm*=_#@HtmI<09p8jjh1CsDHQYrw1yg+ z8pe<14gV&b%h-WZt)-APawO>!prOa32SiT`D-+hpdwu@Z)nF}>#ZBoIxSyWerN}#W z*r+CfYTa>wgrpOvMbh%qmHU$2-ZRlFlsyEHbH zQs$w~l=NTRO~mD&#acP|p>m)6y8pXIgO=ISig%z0&bgU_p)e#`AtKdFyj+Y`aZM5S7QGgLR4?} z>B?$5-}I~D=3x}cvner_A%^LMZa>#od}&UJWL#JzBWk4DEpF{MD=NwCDK-xGVL3#$ zeJ%?$XCZYxTr>W4_Buzclu}-{?~u|Qh+vBk>|tCg|Hv9bufJ5_`PfF(E9mw13txnQ z1=6m?8X+5uCdtO^#E67Xr#>1L5!kVSP&1(GQO@JA^WO4ZB$64^F0a!u;)GeZ-lR;2 z57C+cm0ifzeUyZp-Oaaw4(Z2+($imAtM7z1n&hrlF#6kB?nRE@!Z#zS&TPSf^q--j zUVZFplDmBneD&~G(v5F;5n_{F9?oA&C-9xGBq{AQ-*WDz!k86k%8&;uiO!J04kxEt z^Qur^VM{PRW^SRsJ3!VQl5{7VMcI`?RULAvKZ)<(Kx5ydpZzzrKCr{PC_tfqesHKe zK@AgR7r(JuH7E!lDh%hfDmY!qY>Cy6+-m=XN6{qcdSB|^nJ;tm4jc~0>uoZ)*YuuN zaU^+5Y-`%rT7U4h^C97}BSA^p9TGAR%^Zrtj~{*iVKw3<1U&%i>`F`zthh2mrHz-j zZB_j>ON*J$0|&1~;9Y!vw7&TM1!;i|yN*ir!}KunJC$B`ylV?xI?kiRA+-gDKoh^R)%fV7Q3yog@o?!FeVb*R$S%VTlYptTuE9FulXE{KZnzA=k?*iH!=gLm2!amIwODY%P>n{yp4lhew4H zM?@RM|FOCexkDamBZfWWnDJ3wqBGg?Ku#q*pj;b{-(rM~Yj3~tV2UVh61w|vyW<#4 zpt*9V!}x9Y_#Gz;Z2)10@AF42s=z}qt({EMFFtUt!K4;|HqNm|9IRzG z=jlHu3_S8K2MoOMDy2BG{e#6iodaFzS8v1FJjv3B(Hro+CbfG-{RoNH%0Za+cVJl~ zSLPB5>-Xs8WjOYUvY$9?l`hIg^PilCCJ=*l+MBE=;=SEsUhtOP4w1!`5AN0zUBhoH zetb{gj?%puW?Jt!U3({meU5g~{z`0JmtT|`&W0u4!nH;WZ4#+|44K|P3&J&tr7%ze;x^9h? zc;=BWMb6ggN`FfRTd)+pLV_td$b5Y<-P6O~C3QpT#1r!uv6EX57O1 zvjjcM?}v`NG*l5}^>jMN)sYiJ9*0f9ix(dybLUZA;j(Nz!+rkyIBDU+JDOp9INQa! z0pFZgSEE`&T)PTC-ZJUl%mV1?01sz-J$~H zA9je@Z%j``X2}PXNI0NPdthWO;6q*?yvgMlBJMHSx6E*S2ljB)z3H8L;GC1Z_TXV+ z%VfpeRfY4uSU%eN0}C(3l$lyezvW-bU(?vwJ%WmTraaxDx3pwz5{xuY7+DeZp>cbN zki_WQPl4zSavL6Ry9pfK74gQcF^(Avr1yn`GFAmRA;TFLoAWzVjCIFg^r)r&^;~MA zDMt>%RM`N}EL;DfIPCs<1%vlgB#J*DtfKRlPU#aQi_U91?|uJNqAdS4`_#aDDF)Y# zn-M3($#pyHfY2#LcQ?&$1~*2w)XbDLH5(KHkGGiLUkNC>*FUr*@O$;77;N)sypwe% z$>?`$&rhJaf<9xpKx`)?<(VUu!+-77Hs&c+VEAHwaIR*j#GS(Btx3g4c%Ds{;DkS* z=t&V(XZ#0sI6XUk7UyD+8-sU#nW93pXd{^rrR2l8v&1HtelKC5`ooH3u0C|<%It6L zY-7r*=Vc6x$q6*@Dy&ke<3sdZitmfJR@Ea5De{rcb-~R+I~Mo?T~t)6bEdkEzcy*N zn|bWM)ADaf+YVR8wWXH#N`vw|P+FX8kQ&QZ2xh>>ztpu*EnqFSGc2!LzvhwGyUmA) zePO#EwZ`WgY#boj|GsO5aD1Vy@hs`20u=C5Y>q{*w*Qb*kQifTa_6^<#7`aRa^ijJ z)V$9EZ?i}Z4Rky)IU=G#%`eTQNgCoTS?NeC;;iiWwu*?l<(6f7t!bNsiP}@Gu$Dz~ ze5P;Zblu&95+a-%xKCj#;X0ari3l_{U^619)E=+%t)z#rL2#iOY@zf(1iS9clPy2{ z)@#0>K*#3gUry?bfelTf@In42piV{Sd3oJjuS zVW`jA&3iidJ-n4&cgj7LQGc6%FJj71cSe9{CRf&m`#ts12?S6g%^KgO76yw z7-^UX01Wh_Y6~DPcJIYNSKcnZwer~vW)|N8oYVZs7TKTWQ^HHa|usFXZ;ImriX9pV>ArKg` z1UC96yra(?$vWzOg@q+^+?D8c(oqj$ZpYW_uo~l@dQSp)p!RuY!~3!|itu)JvB{rD z1Ya9j^XZ!xrVfc@*SSrEi22q%0dn@vn}Dj4+>GN#h#2_h0Nlt?OD+x`wPz2uxP66L ziI~TVKZjjm3L#%!U{FQxC*qNXR&U{Nt~ba5@+-1lVV5aA0aL7epYfMSvy2_@bM<+| z^OQDQ^!4ngd(N+{ggP{$fSz*aB=`8jx zd9P4`UClfuQh1Fw%~GiLv3?}CE7SE1=+D4Qz)@@d+nV*P3Lfu`>G-$|WP*UGNq;c5 zy~#N&i-=n-yJIsnudQdYi|EDzYAn>{N2_a5ZX0*r+gjuoE(;fKqG|a2!CxzYWw_gA z(zryIvvWqVneDQzxj#w2(kWrD(Sa_sYCTKubC=KRRXFi2Z9r*m{Ta6AB}6{R5F7zL z1<%OChgJC_&Gm#c#VHeiCLI}JP4LX{%DjX>$nr$ri-q?6H5O8T7kRq%^+tSo)7Jej zZ~`iV9TOB<&@T(yAh$30<|+9{$ovy43=!M1=K+B@HXsy4gqAj!h4*!0a#6i}f`7n> z*nL|=z7t0r+8EbixS1DB)hk;eEe0ogyuLyfo?d=t8~v#fH03?$%6K@l zj(l#FCy`}Dijp33HkAvizP%5o4^TZF_4Z6J2@EdlomcxjOpNO6lo4%)T;v>_oJ!XY>TT4B=dZEq#) zXSvuToCNFT$M1KLV4TMDn~$t%eGgwlwkDd@Mivkr_95Op|0@7pU#POEMZN~Smboig zsf*4Oj%TQO3t(vJN7NamBN1u+YK)OwQ|V%HJ$(VG=iV130n5c1WJCQO z`r)tz(sf|0*8t#mKAS`I*B~Z_waG z``&dV@TlAOd%;9dU;h9*Ggdyzs{JI@TNl5Ra#I!o{H>Oqp~{h}3FSF)E7ZwAwR#Wcuq z$nIHZds4petQ9JaL&8YXQ=Sm=ksVyVZ>?5%gK4|4h>y#<*b&C^@ljRT3V*pnL6d}y z(&9gfjyt$R9j6F?a zm3v$CWmF@}pSaR7t{t|i&xOvp>sZE~dFo$hK&pgM?#jj4QD1Ylnq25X(Gb5mk;VD9qOL_< zH)c}8qn+H3`6BwoP?e4KrmVp0pJ3@Er{_un&kia@y$+4Qp1F^9G{oqB4U@xc>C1;P z+oLDoOdr2Ab!=4d4=-p&^!Th5QR4S>TI3vzxV{7Ms4M=Q<7t&R?Jcx@_t+G^s;lU} z0L+l{idc`rrpNo6z=1OX-pNF&y`jN36G8D|6tTaP0|bqUh#vdpk0Mgu3_5eRC1$Td zjIG8MfYP$0vC56ZKCAnwJ=*?q@?}KFzz{aB+WyaQDar@0<2Sf-g#x*20P%;Bxr)M4 zxHpq!ehI6jJ`2=vJxY$hbTVM0%$qhec{s`BKB4Q3wZdq0Orvw{aa~UwKXm>XC4e84 zqeiCqn%#G&lf%Zw?noG7haCQOH;col2=@Mj*{W;`@IMSn#-oXS;qA}(c9zI*3C(6U zaw(RX@5f+kM%kkgqC&GmH6pa3AeKfzwI9Q5zKhwxnHqFP22u$)pp%3gt30Pg z7#qJz5(Qo_{x1o^dB5|^Q@R*LI0F~?Wz(O#I}|SN-I#p6Rp)#g4l=yd3YEPXi>gGAX|5+RBk zEp4upH3BpE?ZpB?MhYd?J%h>l@UL?)Ma(WNms7A7kRiRVHS|bc<_}}w^_Jam!McY) zBOoKitw-dbm+%TL$g27_Tv_5SFBX-++#X!U>?210*-6VI=@+8zt<>flWFl1o_RrDI z-0MWPs;Wk7lnPKf2~xYKdiOg8#DCbrdUzTd#(!r!mYmSQcl&raYvG;0aQFnkhV#pX z%>I7+TiL9`?YraSV|SzzZSGw-@@4-N^wy-GK+90A4ISlid*aVCcFoOT10RR0S@>Z* zjVQi=%-MM@5dYQ=cQq&^QtdG7ZRLS=) zO*YZfLNzosti;@8-dUIQ^026_vF`kPODf;kGaNmtsx3$f~%A72!H z|9OiX9VwKhpxVEE-87AY1!9mpbULb4m;~_-3eVm7*{)GYGy5m)md_d`el|GZf_t_7 zZz=pbx|cIPN^->hs<~7W>=QTDKc&#VisFm9QrWb+d+QON`s!gcCFY?uJG}K`!<0N$ z;clL3bCD}+I;6sF>SlA49Et1G!Rq+=tRQpmm!;A^%rv9O==*S?E2Sli>kKhsv(|MN z`nkQW(&t?v)+qo=+~wseSBIg_;LJBI>(`NvIW0RBsYqH5dCHqP(X)f2xHY3+GKT1W zPDR6to(qQ-Obwh7xU6}ocITP_DN-o5IeM_jHDuSYn&!NuP? zLRG;bIRMT1yaMyXLKlV;<0nw&Dg>6m1p@u0|Az)_A-M|?SD#BLjE#Eg%ip8^E$)Kp zy(uZjgLjnGq;WF^AKrA6X)`~()7aePv0km6VXE56GPU^y1$Nu~f;;yhH9=)B9h=hM zL$>wZkJvDixbZxoHo=x+^cKEzxcpQ2JB@2+1_$3~bAmCZzUOmII^ERTB-InJl}aT%*u8&jN{zmv z+1zdSo#6fnaJ-HO>d53ae2SM1at+$tlpC{;>ngjeyUjtAHT?3h7Pj|I!PJXXMIf=! zH_!M`^75~=w>vP|-VQx>we?Z#%cn&8ZoZmkl!;;|ZkByDTra`2Z}9{e|J177R&E_J zLc_C;o10WRVR=o!+*!G3n-6!+@&)=)$XVfj9o0+sQOJO!aJO~smG zWuFwl4T}PwDpP#4sk7J+b^K#J1$PZ!{BG5FELk9PX!3izZeY4-dDgS<_)9iJl!B+l zrEjlAm7MzkgY8**0FK^EPJ_4s;THP9^y`r`?>TG;VZi0=sdF@ure%4al zwuGI|6|WOf>eMF5TOy{rQL8<1p>+bvQXGef0k)eAMxzqU*7OWWB_OMQ0L~Yfif$9e zd|M-{Im}X!3}coqMzSKsK!E*VdRhqk$@U_13Dcy&Xr?Zxdw*T~6i`T>Z-Nv3xJkHC ze@u>|g1l)!w4&7(nO=wmYFlyleI7Xg7`R4h0IfNRLx9@~HWXz*m4NDGaE1zls(UP( zA@(8zWb0yW{=zi(ls4tNWWD%apFXsUR|DY7LAT`rjoEvo*8IDKgfTQu7ak15xeP+R zd-QU4{?W~A9h z9SLzLt_BWJTSnGOwBM}{ADvsKZs${_lxOz_mq-Qo22C%GrM-GsYr1)E5kT}^#jb%{ z3e==c<}S~gi{Fb^>)8oFzBE5mFk<#QQ#?HjyQEDiv-X`06d}Dmj)@p5 zer7u5%2btMiMPE%p8;a}8@KT?NcDPUJRz2$n=E11!fQ3%n>Btz?Dj#JedBPt z5PavxD(t>RHv~*XMj5rh{A8V}ztNq`)QLDfd^!0ux=eg>C@|W(Jbi}ul6*`KgJ_{y z;@Lc(XM&>4743@cu}XPXLe?)L*RM5vIQ5{mdIDu939{cz6$p&Im2}a9O{P!S^wJI` zL$efD?{xiLy4~UfR4kCi@A_uUQ_?UxsmUyo5iMvrbF9Ucn>C0GHO%bZ`a%!@03Epm zlwgymQbeGj^@ZHkYldVLr_+raPuDWRcIdXo0*@BC1zg`qf7=jI7qon1Rus*BYji{l z6@~a<4CDJKR3$wWS^W5&nxtI3O5T~}4FdJR2grPdb&7q)ZGH&h`agrH;^F+#I(mcq z4+tZag33>_hdpCc}RqUUc`Ivu=qY_%p&d2-{Uh+@pmgN9Mj;l~@+hKmX?v-A;)L9VIgqkR0 zt&V$8#_`l)$!rW=8lLPm;_wT;W_2KpHh2Pf8p&kae?s;*Qcb{ym9L4MS5CGJ%tJDU z-?TKi{8C_B0k?7dhuX$C+JF0!RRl6PKoI8huzYxrGMW9C{HqA=OjRpn^lwSchz{@@ zA4h}hzrJDuTV}7(sG=*zZUy1H-cciR5lM;_3hJd%0{!>2M(JNr9n&9kpxXS>egDoX zU)D`?0`T_l=~e@|?RQpjR@GhE^A{k2T8`h=3G7zP7fO5s>VD$oobAKnF7=b&XlNAa z1RNO*jZLrw8jm!2OoWY&>e!Ten$u+^(0DjGiQk;6nDI*kb<;%Vai-hw{Fgd-t&rhN zRuJI-Sb2yJScPH%#P>U^Rr1TVf4K3wt{qBj%+{7UZzfj>q?>-`wj1`0osw%j zoBiBeci7g?{Mg|)TkZM;T#y(pF+|yCt;}3(ZZ?_j-`$>OBo4O~POtZYcs<|0cT$dT zr)<)(T^}7jzgBG9MY}ScI-%sV52pA1iv?rC0jAvRKxm0`Pn;(a1Gh*_9{=z-Za(E$ zdC-IRu+~0t_MMMh9s1o;HSs8T=WV?9-fx$r>axuX?7ijna(~wU(t3TYZNq#0tU=OE zdePRVa$|Jpwi&W{MsW0u@w_Ag2;4)jNmv@{@>dGI8b!Wz$;37Iy)^0WOb0?iNF5}o%&i$3{V!NK2&C!@H0)|!xl!K5b{Mg7_} zvtuDUM;TiCd^X;kExX{i&+T;_A&8RcH;G&`x9Vu<6SdzAN}T_VaF>t+o#8#WYpvR$Wgn-~*%AJmSm#^KA*-G7{3G$dW9&cq|M%SpIDG#*X8!^Hul(m2YC=eeG!*RL zIO%`#;h|t&z`{Yl{1ZS8{pw%T|I0fStl-~roV+pbbVBxW_J1PtP_sS0##DI+2QW5+ z(*B|0b{EG$9rE(G7X3o=_br%$QuXA94l}x5@6NwDj&n=0HWs|r6d|*}{^p>jtLEo7 zANSptq5q9t1E40>&s`i_TBedAzE=M)6}JonCda51h=oqzZ+I2}d&`N{69U;eBK6#SC|m}zNGdw{eX3y4OZxBnms$`p9ub1P1z z9{u{~_kUgfO9K!MJo|pBSCG$VpdsJNz`THmdjSUx1N8zH1{w~4f`5gD!>)>sf%*Ck z8#b#7DWifK851)LhmbNkg`lF6{#!~3<9EVh28Jd^U)AkmEC2b_1_~MG3AkjA)5ys$ z-i`|?Tpi$UZne4aGEn2VKw##Ep#2P7kw1mT6Kh$;7I zQFMBquEU0c!X-UxMjM9qdm!OP*qszVq%<*BlLM|YkZMrTL*8<|OA-1`#+#GL$!7Mv;#zrZbP_D*3p>e}0ed>Gv9~ca6nsQtdQk6?|OrEnj1*0`WFS|sw;*YPd zu3l@WB&%D63FteY8}e!Ey|;;%oQpUrS&qL_K+~!GjC3`xKH_2qFY<8#YEjiuG1k2H@`ISSk`JkD@gQK&N zQH+h<{k03JH0_X-9pd*Krk|fb$0n|2V$vy%Bsq@A7`dFKDeZ;+%p2LMM631(-Sby)D`_W6ovEzT*>IFk^B&TsdjmOQ+I2={A0+Rw)nhpc38(? zyX&j_x<>IxLs3)v>yetw&92Jkbqx#4Uh@^TjhnF2)+(J)Gj&zfsTj-s)>5#sQ}xpL z>nJX67YkAg>n#g5+a!rjwO}Dk#0ffS(O6nk#@wdEzy?L=S9WmSnd@d$4H=7HgLjO5 zrG@8oR8&JE5RF?A4IEb7IupD$gVdAq-ucC8zJJ9@x0~EzD3RESE}CXRHce(GnMpMv zdxG`tS3p06y)X_5)JLM1+8-sAv%R$khv29?)e=8yY=tV=1rmWySR_AoOT&I!VkckZ zPjnNp1wxb?a!Q=f2mI{&Owz8W-ndvx9aR@5S&)7&PPC=o@Ig1-IBO}a?T53Wjjd*2 z$o=wa@N1#vh3RhQ2GSWd!!5m8XAECj_t2Y-ovJk<8(J5f#q?BPFWQ0pT~B>t09dX|;zAR(*+;nMam|kSU#B$J+wziiQtSOTS0mSRSZy5noUet zlij|ys3I$(y;cw+ViNHpN{kvNKROX49Jwy>LM2qOnmujEDCM?q=2$6v$P8KUjk4nC zWV~V|G$LWDBMT;U7Im0493ols}i1^ zM@<_sbys)$!8H2};1vbw#R4`sOc>qaJQO^2&ar48s;UbpDYNTLd;ZT0>u~ zus^>X9X5~7xvzmeuaiV_20~(<2EjgO_^s(mT z!;nCIihXf2j~SRcmpi9|rf3ZP#%NOBntY6|=HwKzgHs@|U2|*5{d+Lu;OM9e@2V|W zKziu%tS{e}t&Tdzj(z(+oBh3XSzXT?+pIV-N17=fUy5-5pqYda*L3?wg>>u|`xWZ1 z=RzZncjMp-+)ezbxFM|NAG6-Z&PJrf>JjGL(uAM;LB}W!U<8KnRHxGFVxt_W93-^> zROA-YUeu|dePXY(_|K;}4y~qg@v9p*Gb-ya_C~Zqag@~PJ@Sv35As(Vd8tl|!zHon za1=;#`8qUljxI%V3`gHo`Ph3bnO%Dkhh@s$P?g45`e*LE8mm1+ z-G-t`A>;V-Ie}aXEXH_C6Sbs-EcB~aPXOfe%!v()oEiL#5=z-K;&fpGlQlF-vX5(J zWkZrmG+f_ES|+~NWO;ZLJ}zwcMcjjjIkk`iP3f|)-L*%J=~EYE1ZpZY+>8j#1>P*W zwucULUEH+hj#`3vHaucg0GS{sH^wQl@cQr4nGL*5zfGx-ATHY*;1N&=+;@+Ru`}e_iZu@LZ&fn{{e%4%^4HOY-)?3J5 zx8SYAsQ-T3L`CSe(Q?&#<25BJ>RihB*6ec@k-_#?J8Qj}7%F8rWt*1?$Jls9yBxla z*H+_B+3u-{Wfo#8-%zxU=MLJ+ zney?Ze(X-EvJpxJ@}07ACbS0gIDhW4gIJbopII&ghC@U=l{n)pcg`%unlcU8e6#w< z>IWkx295UgzE%L!aOEn^U1Ao|&1n2_NeH?G26_-NE>sR~VB1lyb)cOeX)S`1Q_kW( zj2@ASB8rt-ub7ggZGvJhm+7wV^nK-VFXv*bbfBgolz8RYPtF3l^85pla}I$hq9xsnhpWl-4>;SEgC;sA+*gsXV{N#9;f?oOUU#HBFZa+VuAQ!c6kRHbL8d z2FI#*s@F^^waQ681&a~iqwBLL`$N$VhA`qOd~)UIlN`O1t-KD;$5#6Kbkj$BbB+`6 z8lt2R(8T@1%+0x`dPN8w#fZ=)2LVFh?!UL&k1KBG>Y7b%6W>jMJ436yV=LX6{4d1r@#BE2R+#j~HoJTk^ z&rcy?;!-cGVw)<4JxP3(_8>L_Vt8mj&ubnM3L&h#tjZEO1caDRv+TA6XReDVzIh;^Kn?2&|k;Mi!uMW z8@AIuMx8Qid<~yxaDDH>fNH?Js0y93A3-5B8RTweQ3aZnOr%e1qxyuRDjBwu;`zhx zn<%b%eoR>U`)|eFGqZZZvfT7?n%j$ZyP52D`R!Bqt|G;n`SYB)$>NF$yf6A?Un|P8 zD$BA?x@dAOBdcI3GKs!NvCInfzv!DuA^W7;@@AwqX29xusCJ%$tj{6R^_J?VsjJ^i zSx!b02M-v`L(CtB=vtVFRj&-WdF*>EO^FE$2h%J@teoWGqbZG5(P8-+>|9gTV+6jw zQ<)Se5Q{aOlyGl;`IWkcfE(PF;9?GP=}7h=0*#MVM^vd?ZqF}5I{3n#%oDKU0{U6p zTcW1PP;xX>Z`Qul;-eS5n%q0~OcZu@IE3e&MgH!aaXG$4afFIYe5|U@6ELxIj<((l zZ{@br=Y9a17GvEI?vFy=0-Jo*9UW|89xXK$!!u+6nVOpD#4E}_KBO8yoKc$YKLKuf z9b_fgPrw|k<`cmE8SHWC-MW>ZJ$q%y++QHDSb#N>sPd5biKcom<9rNOHrd;)g|S(x zrdS5-J+1f{Df$8IWSSkCrTHn$VaV9ghqp>c+4!y|KSNa9Z8m{LLQZoa*=g?9{Ua+Z zqcWobA0x7ENvpoBg4XBX#ZeTx<`&@`A%%G!Cj07+NGfZynV4Qn&CQ+8aY)>qt))te ztad|0n(D;w36g1b_)I579k2}b#FtG{sD%*eIhGHt@&#Xoy!mcytfPlTTUbOE1;do( zrkv!Ynr))Gzdl_vY=8r!U^6fIjrN_LglP3#gQmKMNt{A=WOhEd4W)Kl6Zcz$nuv79 zrVzWEagxvB5O@pA;BC_ciktG)>?&DC%@{Af%Grl?*DNAfMH$WO?^(qTP__QCqRbyM zVx%eUqgL;$RkQ5Yynj}YR!payT}dQz=5$Ps3Dgj?Yi?heCh-Vl@$e60ihFf{;jyq1 zH?FRWvR4f*_VId`)b*VkownD>McY@Q-v|cNP`_1-@DmF2V}|FZx!AuA6Dl5~s91@% zDa`CSl@w*(kK;^;>gPl_hd$uNl9he;EeUG67WStiqYmF&>Mi(#l?w160*)O#J0#%r4Ig4CCXj*gC}Q(}#?58|??BMLz-wlb&wunr&Y!t5=x zzstHs3(DYbtGR9S#;q%-Av_MsEtm|J*7Vd~P0oja$5pd5y^B~t+#~{00!L1k89PJ# z%V}e>#0Ic>w;<2`5?QdtLm6#743%3}7O&CN-IDe_f-s{i*zxOzuk*xmw!kk-!DP8_ zLooh^D4fTZr@?uO8CsPnt~=i-ezr}Q+ut%z7qwd0$xJaXOwATfYN$l3CGCGis zRQMCY{Z67CQ0m7XC?a>f^P<*inIw|Hx`oj7a? zIc5X3a`e@Bo@twZbpQ4RwufDFGP8Okxp1~-sHVe9mVVwN&;~)xt-o+nlxWrl36@I{ z_F&MaDz|-N(Xt*;T5`}XlRuwgY(A`7ixEjJBSR6O`uzSbM=MjSL-WyOwKv!TI+IaM zlFq4Ssze)8DQ*L5GcYQVEd^}DR8YcQ42h=Edjj4RAb>psb+WA~b9o_4yIvJVI&TEl z-AtC;)0*32o?P;L4Dzv_!Df~UhC%No<4oq=EX{|M!7#J2_h(N0$^^gz=$EmsPUhHj znpyFn<=F$>@1V2-gXCcO`Q)_x?D3Mekg)i+d3rr-$L(xmw@(%BB$KYnYz011S7|K6 zwvl+gmAHhpqwMd=`B~avlP*Hn{@?sz~br=_dB$;hw! z&Spo9GE=IK2g|-Ni0wWGx=ZB?OPks!FH*SYj)vN~pQRsu2G!QjXU`{FGiS~eYGVcq zCHlqiQP;s2yk|j=Ft6S4^-?<9n3RP+;22)KX+R51qY&>_WmERbe2JYKwlhXomD@ca ztR_00yOJsDWgSibJB9h7zVHvWR~x0j;HLE?JIcC!l~^Mgr-&RR9v|bUX%7FvQBTP zqQBnFc7b*ITpV`xpfbBCKc6LV8SBMCQ4c%X!6-Y#Z63Bd70xZSDXSeAB*tB4H@+1T zP$gDfs(9p1Llss?U0+~tL8n!}+#6?W?V%EO4@RsL--&j9)fyXAU50z4dO1#^BG-3Y zW$vSGrTRhgn9QLBN+KqXHlaC21Ej^3v(ftffloqo`ph>_i6cF07CG_iE+!(ni{?(v zA>3#UyK6_fGLcc*S`dlpzDmt2|3PQNLix)Hlyx)qf$T?#<1#*OSH4bkRNs88n@J1R zGdLMjOSHsrWqQ*FDDy1^wDhmsG$BHtAchLc6mYb-oy&oX3#y+|3MIQtHBR<^s3U1= znqks_Pet8(nZ$l>D-zvHZCq81kQ;~PFGuJtttl|0OIP?lWtd}Q4Evi!T?G$y&#soj zlx_C8+};xqkVc~bYpEgsY9$9R<#sSz*(x|0dEh_+C%wQ zC*U}Uw9hpjfpR}BsHmlDS6r9)+*tl_dOq@GR$w>sc=rCZElq#m0}D3`8E;7soraLO ziWp>SrEHRDJfG!yJ}fZCYXc{+8xbi$)3Q3|R(S;S*;$eC(euEnGI*zx=v_E!fHN<# z7;Zp#5vwe+cl_b;JniMnT+ZDf>f9Ua!HfzThjW^~?dkA(X(@noH zHrGoWzEbpetL+L(M+aGr`X-a0Xk|h%{2RH+AMz0h`I^xnOhpfh(mEtG@bLu)%8`v0 zUoA6BZ8_V*s^AkqXi?Yx<&ksN!VBi!u6*VW{$^9B>;mnyuSIK*WHZva;g>)^{*ldB zpQ!&O{Rw;Z0?UYuQr{X4G$%WGpT^rC8Xe)<97lClsw$Lt8GWba(|1Vz5HxSUH3&n! zuZ>K7E$Vv=$aeo13!p5pa_I zECjt*{0CO2xfSUd4s1c0T>kECDBp^X*u^X2yC``Ap8#!}NFvXrb9{Qv4P=6Q?A$sw zwn8(D?UR=`&xLl8gr4&ON68tMeS!-|jUz?|Y>ft3YKfD!LiX>5JV^M6BZZst>J_9p z>$Zi?hUm@)cqi29R=vLCs2mUpMx;IgEdt0h?dmX=hGXoSyRR{&lVdsd2K)cdTzXZe z#WW|?Apcn4G*IJ=6P>Y1RRWnTqps*`E-k)eW3QTD%vjL3flG7lbNr7eFQ5@a$bV*D z7;lOtG6wpqu^_RM-mww6MI2!Z`=f}WZ~bqiy=7D!P1~r8J0Z9yxDL*sfdmh3!6oPf zceexx?(Xg`gG+)AIuP6woZucT@SNs(zi;oe*E#FxpI&Ra?~KFHP=Y^x` z?_42#*dRcW9yB|*%h#mxg^Z2Pk;ZW@|!*HF1 zGE|TjATh2>D5-n+&qMQ#5CX-6C`<&g9^sy=AMb;;3Rp}rh*7g(3u4p|aUsI>@6m6X ze!iy6xpMca2cvarqlDn^vm@J<*Q4{e$bLt67U3^e}krr5)2s@>kr6O zhh)yuq*s#gv>2Wiwj`$h)UC{;vUi`F+wA>b*?GXN5tJ`wU@H;az=k?1T`esF(}M@0 zMm7|^w*2wLZpo^=6IyW&Tkxkg24UY#20n|%s%RiPdWjFs1f}Kp`{I0F%DY$<-{-{~FbosBK{+%K2m%nGe?xgCf z?*imCHE~hS&HfzFZ$*)|s=cPt?e6$;yE%9~i#|lc<&ZBi!^OLw<`%+? zR3<}~1nqk%TRvivN!S#(m}iC{d6BOcSu?39{nE6=7Pazcgb!yW`juMOCz`y#jAYZN zc^xUhWzeS>!!d{hEfoo-Up(2CcaX!Wjf-aUDl&5)mw3Y{g^xT&b!KN-Iv*v)^d@D_ zLb6-9nTQg9qpoWFh@VzBid(i<0@*2ZbbosTwKjDr(kdBq-E52Uy^E8>O%{vpa=*2> zgX}8b44;k;ov4G5g^Qo5<{&$1t!b)tjEZ!06@l9~G~k34p@sq|3r^8o z*EPaOi?4E~N|~pD8`IzTlj%CfxO|`p76$3p7S$hwUj_PXZ7^vxnA9x(`oAvey#v$t zS=!*0Q2zcLwpR+7JZ8`l4D1d*ui5}7U7@U-t`lH{Qn1myrucPS{zrcm?r=Oyt!R73-GoxX<&P$=f%t*7Wuuuew>bQ?kHysXBH#C$=pIX>^@Sg?__h4lS2R-I+SW z`+r(*w7ry1v04sV8Nf@0#!m|lEv~%BnFhn*Py1n$lu^FFrndC&ig80eqr8K8O}+MX zzxj9Iwa5OtI{e&Hv}EfF?rha)-?*t$5oSb#|6kO@_kYx5b-283DmSzJfRYo-=(TY$ zg=blbzBEN|Lhu9xE;DmcyA&j59slkVYHePvRFn}l$?`kGDKTk#&mt8JD^s?`nsI&~ zkyS_m(NyFlbK}00%xLuNwW`|Uddb}NpC|rP)v+c^{#_9VY?g%iiO;To2R3=~#K4|M7a+ zKaDs2NpkH;>gD>}eW=_}6ql7gmDvA+ra{1#a<`X9x1R44j?w*Pb@R}`iI?*Cu7#W0 zC+~UvExA`MJi%I_)WTARf-*gtr5Ya=ZyOwp@CLjZ74&nsZmDIgY}U~tkUq-fimRaD}NyJuao05`lu;?4KreW0T>R^}pUBo%^0EsJM~>D0-#D=7 za&u&+=sf~mE7O^WZBXWkW) zxy)<>Rp}81ffdS9^~jRav8xbru2ZY+>wG~Hg3UR7`9N{1A$D^Mjvfn>uXXIKHAMDf z?Sz(C7Y)@BOYe_JoYDDv9d=(!LO?RRY)vx=e!@M*;O{H=N6qb-YbzCoXAcXde-nct zZGMT=sb%~tA43m6vIo|`&c$=6zJ%f#C%Y(gXD*Dfx3J%UR~+P=Rx0qE-)O<*D2=Z| zgk~Nk6Hl^#C}Z&`dsdfX{OBdAB5BY(WF6$gfnW^|8>P_?+Yi86GuigaV&X&;s&y#> z8P`|?$%xD{ez)fi+L!PQG-!z%I(SbLo2ygQWFrryi`wiqq&b8&zl$>)p7PWRf&z|5Q~p2eUq(zC;yxcR082V22H7ZIOr3~iw!uU}nvrgS!7Eh>@-n-IDN{N8K20sGrj5xv9K`Cm|6{T{$fN66D$E|3c_Vb=k*;+T z<#CWl-@a6s@p+RZAV=%?mbL{>c=IRA9h#wilypla4kws=IIk&Hiid5n!e7 z&fa4rR!^%^rAwsbKqZ*TAvQM@mwz#)nQ33w?_QVEvxql|-hAAcx)m9o6(S+DA(k{# zHiUT;nru(C15ErW{MSL@i<`Rg$X2JH@HURwq;#OZGdGjWrfz`w>*TShu=h z|4HG#hl{CQBJ&hoTD;Itw?U7n$6niAT_*)jlIZBtx_Tu$u^J{X*yEdO!}uw+Z-8oJ z_hp+ojRVn=n1CN`^R$6FT=L7sYunBg#&#;+c5&{;&6J1*9wY3vx|6x}SDhgwa!Pp^ z_HL=Ou4?(9&=r3<^Em`9V=b9iZ>VwaE6zdMPwQug!+2qDI9}W*@|eq5PP4k}T@Nw@ zgb`KOm}Ur%;x4WK_MB6WdL_!4fHg5$%Szj3GEIHvaFZFnq<^+Aa7XF{m#*=kcv&rz zPH&PQ=%s3LwNxLusBAw7VnZ4B%j++uTz@ZFVSXdU0S}^D031X2R;_v`R0T2CD!)W$t@QUOg8on8sBc>h zuK5(F6kCyX*izpr?U3KQg>tZW;nBu8{P)1?Zu+sB`|1WMhz$}iaAnXKc{ zPFris8>$`E4>5MT;7)#MLgM{@4MPe6IR4H2Q;yztswltQ&sCGZmzS_j06+tZ#OYMDFb^b zEzMlF*)Y9<5I;3>ggd1zOXy7QR!r)oif^odftc`#(30KNUfciu?WltXx-%xB%~%N{ z(~s@7E3Wi~Te3bjyXAgu%&zW4x--Y(Oj9&7i{yxCol`A?{QM<$X?Oe`yt5vr@l%<9 zzYaN8sa;)VwS9>*v_sD5Rm>|d7;KDth%`>2#JII1voH1-m&mdLU}xTkXSW+=}cw!fTf`<6_F$clI|TJZ|hIy z$WFJTJK|unaxXH=jF35%_zwd6zHQ<0wuxaBdCfTGXSvLvb>vK*kLev@WtK)Ep(D;k zvBvuBuGc8KYy+VNbfV&XB}1s34uw$^zNV^H-kU1C1i03P6AW)ENcarW1y17l5r@&6 zExwN)kmRIzP|$-*dlCJJ+wX%w1|vnSNeH>*#dO)WjxB{tM2&GE*Iq_vr85057wQ_v z&JX=D9+RrRvEUUku@j-We_LMb-wx4!?hx?59rAy?y#n+Tm%(^d*MVi_n?Yof|ExPK zP*rO=&)qJqnH&zA$irBnngG<$M`k{kn zJ@9M^&z0QTNeaJV*nDDAT-p%3<)EuL&kVVxl_goriB6%I4kIpZokIbXN2d5k4<7`b%u5SB$7lT3Pfu?V3nc+>amM96tDBa&Bl!7KTcu%8z(TMH>xvJorQCR8Ywfs@O0!Ilwtzaj-*t>Y=!3 z4Y{sP5*F1WXtUJPsZ$_uPcXi$bMuY7dc^QAnwO14_4+cj&Z!ogoip}JJ&pPRv| z2}X8P#unzS2Eu`m$$MI;jBrTWD*ugukm4zu$H5Q-FR&lIUBG!9_)B-w^i=fZHBNK0 z-8Nrqg>3q{_a)y_VZh0jL-R8M4J9PlGw;41gpQ~FgYbY`@@QSk`wq`u$&zzcRH7pO z3!#q4Rsb@sU;@pWL5f?}|zvjXhUdxLs3aXZ$UkSBjUI(UjV(khyKyWfQsl^IrNjFg8@Sbp%Y{BfsPT)IF2GSqDFnbW08#}1+kNsSm<98)=FDj9XQd63sDOHB->d&B5IooiUZ(YBE=a)lYo zrrQmv)}gpjd}!W5MqOoX6fta*v14r6Ol8>E79iV)7tXfFaeVx%0DsezltqY;>M4J< z`oS9QhR=6I8K=gpDbFva&9=0quExTX16`_xiHvOU5D?3==#9y4H^VisFiVh4R7kjZyEcY7Y?fV zO1EX^?wJjI;H^MS`KX|j)ej9ma9u^GvbM15ADgMkC9mP- z>8)uBl#VQ(V^}DY92yPK2f=x-P^<7r;q}Cbg1@pGH+x>l0M1vwiM)hFUP|GJ-z?}F zoY0sX_@B!Te-9iunlgr1h6P-#JZnPB?o1~;oBQHC^jfuqsbO%UT_#@uok;AaCu9`& z=O-+Upo-r3uh*zAVP-#XuF}V@{U>T&tbO_AR)3n*Wipct@#J5tKI}W&UV=MgKJ7NN zfS<6-mI|IW9|)csV8*ww8M_AqxwDEf?Y5=6+KtGeGx9BncoWQj-ME2?XY1#P)hS+RcT3dt2SS<6j_(716j-PLIh@esBhJSbgWVhrfoj|G=T( zMdxWoR#E80CCey=^q1#+(LhT#7C9Zhxq_?QNWLp>5`%8r(5a$d(7?j)v^;Xt%0^PHBrNe~n6+sPUEuV^3+@Qik@LEX1rz zInYEoecQ40Q7f0jrVaXuLC2GQMnC5b9JjPed&>*lS0qB7VXQxJUJhSiCmVs` z`L!NIJDc+IbQfp;A-LT+a&{3DE*O~jv0By4DQIS>z*dluvQzCy@nXlNH#*zoW-Oq5 zjD(V&a#=o3{3BI&Os!m5BY#F_&aWm1X35nE=)dBs`mw)J&9a1gPt&NiQb7>LmS)0N zZn-=L6svWrK+HYA-~jZ5RLXF~H#({|$JA@lGY~}e+ybo!)%DZc&Z)^kTkyRk>Wg2M zJTSlnX=&ff-5&WGQrB&dJf;1@Z43QdwmU@E)kSzi4fldeMEA6eS}&E~a)uA;fEY5a z2AJy)c_EhR72$nK@3IQ&luToU^v$wlyvq8sQS3T6_9VaCN=GS9VjM3(T0Ww%7~=!)Wa1@j;#{0;zOU>XnH;yB_cPj9<<6 zH!_B-7ctfu=6VE8IeqiNVLXT@somgE5OFC>ljavcV?p-D?lwf^nunWl3zln_dvyD>ED6sYVc?Lj?Tw&L0-f4vBeQ?rE)4p6-L)m z5>zw>*wj>%=F-XAY!o*UFYIR=|*&LxBGu1hw5xc^>BtX`W6n(mx2P zQ}Hx?SiDMGM!nNBo9yd0N0oJ0!CCFF&ftZbJipMPZFDUb1TS zcsPXYC?#Y^z%Y)kl^}al*Oo3vHjxzBpWqBPoW;%wsIgMETJ5>t8d)>5-+*(vpMFM-R{n zRIL&z&XB?rZ*cgtvShH!J$lNuI}&LgFJ<^MC^hU!m-83zK(d#;!?JVnJI$A-d2^DcTU;|T#n zF%EJ5`^U_a%c68DbMoJvoD>{xUt211Myj4=m1D%eTXz0k-$f@YZI~~0%T{LBLVgB5 zlip$MrRo<6Ek+B(Bps5JpU^B8U}M20rLeqT8b((cn5Hn+YH&pkQ-Z?IyE}GAokd=^ zZd=B{X&^ejd}g^Ux*oxlAs4(#%^^d5fr+|$izCz;l8E=fn|u&Q_hzZE>xajh{mq;K?Gt(uK$$7A)DL5RY&SA1 z?y3o^M^h^1mbp|9{f4YJT3POz3E6;s&cr(O=fSgmP8+b=&HuZmH3Q&I-qaW3`PL;#^;0@y1#FauD!r*sppUPUY zf`HErcz@7X6K&EW$K0qGO*96;akwz0cNQ~^*v_HOm=*5Ze5dsoc-W?8?iP6#yC7#j zB!<~HtYp~5_>0kTeCucGuD;AS{SZzqZn0romASjmnY?_gGOOh%!~W-O zPv8VW(8r)R?Rn^4M)N+t)&iWGaIF;S>X}1jiG)`KnO1CfxAbEn=V@W9{~#oM|J$as zE$Ee1*R!_5DUR2qz^!Sx39aOzg5 z-b|MKhx%DXF+Eo#b02F)<9vM;oCq&mm+Cr2Tp8#*DK}&57@gJuYU%dy!j~DgQD57l zcHUrSnrRgwJ##mobGGIgPPnpZZ{`Vr@JwCwt%NJ_??t}slf=Y1Bx}!cL$(iOhKNBx zV^XL@Cl|k*P-m9^2Z8?cU4w-m<#zb(+rEp_HZg#MbJn(O&dU7p&Td?N-*n&`cuQp^ z{U#J=(_J&G24|3CV&oJDDJ4D0eZDjsxqUmVd=?h66FZY7mXMUK5$V@gg*N-%5{Hz1 zl=n0)@a_5&&spc4Wx-l|zymRt+MqJRwoq}|hGpE;L7$?#OqZBB%Zord-`=&8fUA>J zO4ShG-Pd8FS-;L69d3#r__GqfEI<6BHg#+ZyrEb*9c{6eV_;uC{7GpFC=FpPu2}HA zD?v!aCAMI`vWR$U0*Wb1UX{6A3&Uicf>ykp9MGqEL$6@SLX#_dA%PV_}{L+wx4&f z4})a8Xy7(K&FMDXRWARPQme+Sy$=Kt(veMIt{N$eb65bo4-|FjYy9RvF%94b7zpx0 zvV6+JiI>1b8R5d~)be1KT0Hk8o&H|}P8pmd*j$j3X^Q7RhpST>S-ez^=3^1|%i;CDak zVwwn0BEN{h08=j+JRJ;7(B9x>HM@7ujGogwN4RNAI>629>=g~GWP;`C^63+2 z>y^UfW138p#}zYd-}z_1_ET&FY7a9X1Xs%E8eJv4 zQwcxu(uC|ArlFBKCr$%R?r7wX=znRKvowvu-&FC_-82p_ud-!fb)`_|0vLJ3phLXZ zHJ+>yLTvu)-c4rSs=7x6e{{VBsF%qhk&9iMJxfe#wv)ofK}18@ABL6yITh1n0*5i+ z&|i#>337m~$#O9bskH=NmPcTH`5?vNI#mBi^eN-3!+gFsrR{q%kOwl8U4JEB|8>R; zkCJwqCKH*_(LzOi&jNjRQ*xNGL(N|Iin{>WRGB@7)1V%j(mP*p%8Ufw|CEGfH}cdJ zHbHkMTm%dk2pfO#*J{hmd=(_DU**gsf!ws%Akz~qb4(!Ft+V^K97OXJsbnTjWlk$) zCvb{JEIBNVu=DXw{A|k!J+_$Q63tZuwAt z>lt2hK7Dy;JoTV+C(jD21q7no>cDI!ssmDjPrg;|cj^H5lS3>%*+ZQ=a4#y%EFfO< zaxrK&%_cAv?yQa052g_ga#Zvo3)xlPr=e@=q5SqUk}+0oMa&F3t&~uM`dn?uj#h(H zC-HsemW8G6yqvO}Q@iev3b(;potAkNYC_C0;8DpHt5c*$4J9AlG0))M0P3b53nGyf zY|_m2YoGWCJZh$%z78e_DB5F9J@!R=_F1~ltLN6txF!Kw6EvRDxT%;P*lo`#7*-Ez zB+uAfT0kyvaLa+o%&F9OS|k6Me`mK9_Ko6Nmdx5kaN|A4-?a}7KaYKisH|o}Wwlq4 z#?lj(4CBbCNRdEl!45BC0XgG;tXYk z?Bv4V>6-h>v?0iBrL%ASpjeG0NT{%+QFj~(#?YsjrkMR~eMPZv#{=Tcz6P>S zud-lqC1Bch1Xz8em5Ofln~vCCd)8Hp(9}Q-su?_kcAPs6x9v<2R0t&gWR9ywGqHAlQmi{hG zcz5-=Y^zboyVKxt0`+|HWuLA-BwD?n;0y;*x;=R$VwyA=JSR~v*pS~?FY=EL!bBE1unoFzYI4MsrNU>I+1+}f|; zu@j5kc~6+WUTyaseU@5=X@*}WDk#Y1cFDzUHoY}_(5V!0fO{TrDttF9X)hI`+eL=K z9Fs<>tQh4#;ECi!B|{WEbOQp{)aB57RDOIfFb^Q-Z&bK;2!NcStKv_W2!3a}^S^x6 zQ{hb8`C>jL!*L!~WqkDyVd5tXSpIVm04f1R(KlJuTiGU8wJiRbgdg|rd0FU7rCsr1 zCzX%Y=hm0X&C%a+J8Y)yuVkM}vzHC#em|)ot`W^7v2Tg;jbfjNs(&K&mHM9TvQDRo zk%`osu8V)JJ#+vy#hr-VDYOA_#=v-W#J&xnxC~r?;$kZ}S!#Ss9uNG`u)XxmuJr3; z7v=3|z?fh1l`;;9i}Od`f%8n+n<)T-q`{bZqCK8ST#hBDqI}=(tUq+nxGo73vQi?j z!Jstfc-xef!MevAw@D_=P?^7b zrSqWZVU1=i>n|glY-g4G!w*}Lo(uYARGQy9=7ctLuIDR;o`qHZ9}4&7n74mT%+7#O zdU0em%gl1cb{Jly+2YDJk&p{9f`QF~R=IXol9@5G_?>am^{YvJi*=%fCtB@%{n(fY zFPzSuH!b*v$^DA)EL>Ee1|LqZAPm`1+M(ERlf{6dr9;JR0{zbetly7yWMN%sMi-|Y z)hfUBjD_X2ytN9x|IVZAmE0kEt+d&b7uoXmLmI{j!wnU2Vt$80^y7Qze3}qL^V6F0 zqfI9-H)-Q(+P<%GtfT8q3DGQUx=z7!_U#Kz8IFGtaNET0ej?nhH`IK)%CmUgvn60v z#-G<}(0wiofA;h;_MPzq-7wzH{|eBADtOa=tov|B4ucc!0-gpY9hL}I96t^xBEI*o z)G4+aVEq7s#NN{t3i03ui(gUV+%+~hxMFdszvY(Y$TgQZ$h(=|T-Y`6rI>`9+gbl5 zQHM&4`~P8j+)i&6E?j{b$L&OZH}?A1$x!$ymRqUv85oTj;I(}k^c5?QXDbQV;N|Hi77`tme5h`sb z8c^eE=xDjySr;BzntZ6z2ezz_)F#+YDi-5xp;yo;ZQfTOqV$8~wNK(+b@wnst*FiK z^{lEWCYYLQkS?@$;`-q1|2qRfEhr!bUk=)~be1?l$^!C<>Rn)*; z35(#fm70fOPLjLvo@&N`ZIT`WQG$yky=-2hX2noW^?{!?A_=!M9fT}`cXe4KPz#oq34G6ja=Q(4|n^&ZjHFM zKNxo_Ky0d%mU>dlng5e%7*149$3;|nVAflg-h`6Gfv%mV*MbB)irGq7lMba&uvV!a zg0kABqil}!U#tF5nejDenelgUPEsW@5G* zhC(}Y#^?bomyp%i4yiZR2aPk!3PB=oHn_+)H(GMT_mf@N1-cA8#mtZ?AZ>vinJ zVgTk&6mJnUakAwnzDwyzHLr+!1fj+&lyQ?X2Ib+sw@^mV+5EC=ZpxP|TE2?ZZP#hN zE!o5-B-x=%s=m12vpaHv&6t6H!U^Kr?$hL3vI$URfg{`(rY3*o)K2%0u>xNr6fjJH zS09B*Xuhr24kw<+lRLwz^2%soxi++drVtJbD&&kq|7v?_JFCbAu8WA~jHaW0>de8V zuyUUunIaaNFGLLFu+F?Px(@kz;O zHwQ=*&ub6x<3Mb6c3&S3eOk526BE5!`O{gIp2t~kxIOYBwh$?k(G?Vng)5SBzJtLo zNg_V^*f^-JZfuk?MC~om$l+;pG@-^7<@)=Q_-fv$8*DFE8@W{_!d_a*{tyEUZv*{9 zDX&Fhioq&tw4d`NVL~V*RcUu0jq?v^wF?TX4T%U-V4Z0|eY#rH78)6lGSxSIp!`} zOEv%dwD&yv3So`IovM>;mzBh}#9y-C_Zsp`#vFXVx>by}*5!)j$r+(`8Zp$E$sqtf zeVd?e&$l}|*gF2+IqhI zoq68T=d|(Y98Qwt@vrEPU0dGzcDjQHiPVIV_~QF9`x3Q^9c_pDYSW~Kt2MoV z2Ws*92%ea;)n+GK!rT$d!tW#`Q`(R+hGCFe3w2|wK`l}dMj}yB6O&t(ElzIVSzOm=waBA zBFuWo98N#U$TWxME*)d1ZxPDru^q9?^CZ7&f8&%^zZIenf@Oh`V+MVhE0n+L@Y*Jl zGpiXfOa~M%V=FyVh(ncHjU3ZJ7m3kk&I(4Yh-t~_J%es?h z%lg<;z$3V>6`YCI{sOV5!8gT3!?4*b1wo+W@cN~dbM=2ayeSwNy&WIIwlj_hHx2hH z3c~muN*#8fmD@DZ%a|J=67#OvD2HO_$ak=-`NwL8R8qIb1tgj@j768W|Wn(+N&+&DPN8V^of?DdG*q)>22z3kST@y8%bZ=>R}kJ4i|U zgq8v*YjZ+@U)Q26Pw0wJ>AQs7HM?D6-?zU#o|z^$M{}>t*R8@siRc7VT|x#PS;;^s3an!pr07cA6w|mAp|n_PE;9Ejlr) z_2q%r2)h05QtzhtI9%TCsr8+DA~^*NSG!%0ny4)!K@B!4*n9tsKDm*3w1W!m_1@@? zR_9TxNWM(w@5RSzf5z^;SI7jM_HG$&s@MhtO&30G+wG7`$Tmz#6$@363gFS$!(Z{( z0;(u0P`!_?RlyvEvSLFfKkg1se;~|mYCE+FLREOEC`#VN<7$t zVH8{NTFah+WS?CV9UDP-uR~Wl8EJBE zR>ooQYjC0m1$I$finZ3F1FP)2U7bivv zm&P)k9x2KuRsJli@JcvW*y@}#P68(+RJx8<`cnj5U0@T8NTs^-w$zmT>AOcni<{2% z0**_<%;Z@&-jQ-7^x#De?SFymMoi=_70M0zVvUz>XfpK93yF`wNhK%FfA3endy6x5f@2CNyx9CJ=2G zZwxy%$Q|yBFwxS~$1}imDXF9%g*CKU64q-=TKovG2COHs|3MytP@xWXWw*=ge_#m zkUI2KW_^*@nBYS{(IXcwY2a)>DvJr}Q1_BcTKg;esBNAl-*-k23>PjRb~~#Bv`rzi zAN8MJDi$sV@cn83P`>XXXKTpRf`l=f^C}=KbiYydlnA(4C%okh2EFa~w(1z{S zB0`k&Y7NjMMah=~dL-huoGXvEsV0tKKou0ILh~&Kn1BLeSi8;mS!!T>JMs@Um6@!S z9njHCw?2xT=C)JG-^Xbb$rVYyhJmT-AE5gjEw`R?0@-FdfiXtXJ8jr|deX(cMHPk| z5e-Uc>FN!Y6)d(k1X}zaY5(allI9*lKDh*m>p2?(yzsVU4>l1t_XupWRZc4!zEq~? z>TeF}>qn`JTyC8yF8-WvlQc7#E*I}OX1Bb$p>P;b7>+pN-%y>n#|ZwHN;UfoD4-00O0-3Ao=8dwG+QEbp; z{{*j5i=lDIU9AoeI4Tk2=G~%q46kaJMpn4|Vo1_6eEEZCxyB&fm(#P!Abi4^fjB9R zqBbvIB1UwUpmqt47;oP5spwx|VYLWSUF6ClEEYhJLJXkn974^w*T zWWCx(pyV{#aT$Z2k{xPh$1*cUX{(_^tB%tg#q8ME<|kMpEXbxZmi=8F}8Fl*5^?Af~c#LfOqn>W!!0a^*b_^Zlh12P)Wz~ zykn+Pu!l2b4>vx=6#83u9JQRt+!`J9roVe zN^WjJEzBQ|L*#cRx!;$!8$xx!IpZ0OVlKyzWHa9X?30IZy~@!bLPB65&oR6k!HplL zC??mGoWYICT{RAx^_6e0|3Z(|NDp~#FV(oW!SA2=zkRfkLoI;NzpI>Nt?7*M2hM@$ zP2y7D3$NFU5wJn0xb0-@q}-J?X2fpU|4di7JfvYy3{k-Zu}`>*MA?XlF!sLI$^7hI z)Z!8>ub zGwA_KOn6^T3w7x=erwC7Yu8+@mlsKF%uTvFfkQ0zQd31nG|lwG$4i3d@OwTMKZ-&# zGK6A$hW;Wt3cIB;2Eg4z$oo|JtZp^!@n3^m zrq=L*$8*%4>dC>M17HzXVuIgkipu|_kw@Be(RVzobA#SOjD3P<{KJnML7IFX5^Xq7aM@b*G%FY4!g_^Wx$El-Siz*!a7dR=Vplv_^T1yD*2|xS%6@MrcgvI7NP--jVpg9dJRlldq_JK(N zqJGb!t*~5nz%EkHgn!HE5AU~Gr4zBFR=`Um*{O0$JU1dL7YF@2a{fWUeqJwTsMuV4lh=8>W0>>-R6&gMwUz z=G@$_$RZ!YL@7F9`IbZfy-M<)^eGt*)2{RUX=o#EC}cD2q?tZsk61=WmO13ZR=Y;1 zQcw#rq#^_iDpkML%;x=-N28gEdev5p59tFlGucYU-Xt1Xx=6hXvucN!`cE{nRx73o zcx!$Pe8S^uJsLZnnAFlxG-X@9sLbf52rGx^TZT7VnJayysI4oI3aTrAb`oMiQ~3!t z0v7t`W%l&4ul@xh+`9Ut+(e6=6jV&}fc;sMnm^BNC4F+O5pmp6r|Va-xs|8=lmHesy8ZG4#*nLeE)>?Enm$Nr?V!sF5Hsx|hvWem18Ixj8t5qW&< zzY{ca-oN;Iwm~&dF@HLd&`HXkk8Q4#^>2d1zpWehE2jzq)^8aD_ve54X*P1LdmHfU zWTbEGbqn;>xf;dT0G>qrWeIb~fIh{kY)#vLZp9wktvti+F?syYn!N*QdJJkU)jtS5 zPv4x(n-zi4GvGwI?H%m$e*{;V@Od5QhhA-QwUFC+84HwWcf}wkfFYZ*9Dfj z7!eBBgpwa5qVwQZVXEvT41c8KbEtEuoJ?~c^%m(2OP1UBk+D>Vy6MuoX%oC|O3u-Oxdz zyAcxeqj3lalv3(VZ zSa=#MTzM%%dxc`gLd2^{b|y{E+V;`x3RIu@OVv?=W~4WoCEfZl|HsHAnBRq{cCeWs zL*g)L5Ygc{*^`=E4G8rg3;Dw(HAcvZ@WoY+CH}+r=bwEB{$kyz=FDi^g-iYkW>fL% zYv<}>-o%As?Vo#+g3DT6TC0+~`YP9+i)Ppy%AHKc+;6gur3K0YHdlB&Ag$!mp=J$o z`NI#g!db817c!!MxaooGWKO-x)dpo8las=g3#8R?Jo5B$qO&jYhx8QeqV#B!a~s~6 zXWV|e80LNWi@z`b+r7Leng=;dv4%2E8-DuADYJDG(2+qDO`fYWW9WvGi}!Xd z%PqsN8jiu>vj2^;w+_mq3Eo9<3ju;dg1fs02=4Cg?j9_-ySoGk?jGFTg7f0;?w-4R z-}gKBRNZ_2I91dZwKLs2Gw<%qGu_iqH!rcIx{JvhbVNjva-bR|$dTC(cEh|{xYE+` zPW(yjm1OrHcgYvQ{?oI-hD(%iFic#FQut9cRd*f3Us4pr%$J{LiKY z|4hrBWQNvG=e@G}?$l5h2&{jUA=LzuPlQiLmXzfCe|(x=QD>*J^=iGRyXgg^gWRZHxV*(Z+aZvg&LNdQ?2;zkIyyC~~mRL^H_K|5()^O6> zVTGd9>f=~u-v$=#QZk|S0|M-dYJa~|Ov$<2FQ@jJ)uq~JQhyDaa2XwWHk8)vo z{c%aO29Gm*<(U@S5FpSIBDcpdg-O?L?t})4M4uX6?e0&xBx$ZbCEa>;wTh!|Q z*M&bcIA`WzSnJ}`y>3y}R$3kZ?2W;bZW*V~IsTyu5)Q}|;d-4YtYI?AbKaPgzjlAu zZ(gU0hO+9_FU1(OQg1jmJ<1tIQMX)&uqP$IHz~U+XJ|MoJnWa9yL%n! zu0La1XM5W>;DtM-iEQi=k~C%9R=2s^Pc4ZhoG||jhO~kQKS9)xx&Q0x=M~6KFN7XH za37b~juSecJOq+ca3wiwfM&pXFDvjSufCFbxhjHmzxy7y-|jcB|A2k~SH2gA{S5_n zH3oM2j4~wL3u>b_X3_vnA=6&wo2k(*wNP?M6vjP_zE53#nl^zTWT$q%z9|xpYn!}-`GxVk)3pho>H`Mm(puW(|B+%B}bcUt^`&1kW(L+k)($(>&`!!37xWy>z+0y?1MX)g990n%&S% z3F@n-=r(*!-7N>(1SDBQr6#p6tLx?R>;5II2Q0>Zcb{ADZ$i;JjdPeZd#axw6 zY^h7uY@~2lIzW9`yZc?_cjP^@kOG%O>ERPP=ghb;^9gN#c>pfzCFs72Eox|72FLKW z874t}73B{Rs7g!TlRl(=*?hu5nJX@uP3N0n2=4b{vekriNWwwiHdl{KE0R>4G>HCD z%8!^eoMhF4_29OK9$LpV#TqyW{l`@BNHtt^zlmA2o0!;j?5-LWQr9_I$SeQV*q|XT^n1ms^@6s3vLc^(m%^9%sk7;RtAY{_ zCXNz@3k>I+p=~X%19ZSX$g7#s=UNga=lWM@V$Wa1^mVNfUBe%gWjxXone z`NU(T#_Crm&4q(M^Bz^*21Hd)jf0)c(2EMySEZ3)E-)=W0QlguX__N9UftW9wdTvH zEf5z}ZWHAbfWU6<^Sc8849iI4oI8o%!gRqHN4g@*3n zyB($@;i#`7MUpk4YSm1l>XRU3r9jUnFGk~hPTDkza5bCz>??~A8~X))7KMHwJ;9=w zT_1s6b4aViH9<)q%*iCr)Bjk)TI{1u_(k2;YTB)+K8c7iDNGO>)W_O=4vG6=&mBGH zi6st+rXkPNZ}cM@Ez`^B0!oH$v=7ZDuNtXY;fnD?eQAAJ6xmbssj{?!)+ImLBLt~o zNB>ylRbm>+qyu~H)s$OP!?l)a?aN8c_%`h`CFcw;oc6wJ#EO9WA__rU11g-XZdcMq zssQ$WHx&oR(WiWS93y;I7+5x_4>eIi7HsrJ<1&arHlp9f9 zOlfk6*4`naM8QN`(QqVr*shG#HZZ}`?j?DN4X$*>sUyhRd6XYI;cB*i;?f@!XIJs& z{a}o=Q|>B=%7TJNu%!d#sM^v!=dz6>)|Su=^nQSu0;~e*=~R+xMoT4;KW|5feVbIb zzJ{k-*mtlvZf3>&G3qv@7wyP{D@Ebo+ReBXHGrN--wT#VOxoJKd=koVx=+nE2r9Hq z+B>k6{}6ksqa}Vu4S0GUq6QJIZG%L=*viKz2x&rxalDVWDy^gSqi=SGDCdUnscbz~ z3a4WZZ9K)SmdfT7JsgOc`iZ_TVp!~ZFAnpLYnbcq>oOmyZJMKprh8nTT>mJ`QB==| zG~RKjKq`uXp;x)Og)~mZEYkR*SuKstDLS}eW}1uTkoDCKf-K}I(82uq52ZSZM>5xi z?3-@R)&3Nw(| zUXQ|GCNkvbTUTw*n&)x847rWaOX*~RxCY01EOlu^6uc}=9BN!@>H2l5WZc{!V!0dc zh}F;m2TlqWMVjuhXdP<=$0$SG)Lx)UOE1C(ogJ`MbdxD5)T~9JTCpJNF1m?I<&(mU zF)uda8`!OWd+5-Q{IVp(;!peZhx37yA)o{48qM7sc$%+a_sn@9Ipeg=n2^&GQ@)~WQsKPBHStOuJn~6$Q*lX8YAMqLWJsW zm4sSau-@kn8$fFNus`?_P$RccIGdmUm*Ge)Zs!* z=In%=$%vHeRA4v>7b>dygABd!;%Wi>mf9=4A3IH>y#?pr#OiG#D`4FHi&V(lV&c02 zU|2eWWbmi+$!K^DS=>x97qJwTt``FuW8+FogSK%n9&dQXICCQ%9!XpNZj>o#g3yTV#o`WE8_C ziB8{x4en}QV-)rp;TJ)Bx9i6%gtjN~*0wn1YBoQY zJ`$G9f2KMX?KN*GLo7izf8ys_?k7Xc#a56u%c9!ITo}o~EH^9jjvZAU^Cq%uB|r|1 z#-0OCiPl%cHtfb9=9CbOFD2>}Ch2T`&i*tjcjOr+P~c!qW?v%P4lXL8gkIdGvF)bJTtu(@vFsNHs&lmC`Sy8zja&P1*>_Vh# ztAYTp(rmn~ZTK6YQ6=pPcg{L+lu%Bqj<*g%iZ#r%eyIjwTZV8z4o^3Q4#a5gV3NU8 zwJ&uQ>JDzEG}0Gw0YL^re-@2Wc5jO;O_C>d0uQ@>%64k$!6pG zFH_$X*jW|gX685G!;%#Mk}H<`hX_aNca0`@JJPR>O#QX>)9KE0D5?WUxJ;}8(wu2w zwHeGvM`}weUCT8r5au#wEB*B_2#U-BHV?&vsu?b22O~;+xW8K<+bp1e z?_iGMW`cu79CTt}AI;b{d$~)R^>R?`-n&gbI}ClqVda4U|2*YG%{Es&=kair*cn{O z=X;LJYs&?mKwjj>Nxb43HS~4B`OK75$!>{*N;Y}0Sv@AXYPnLqT64;c^b1F?8Uepc z1h_`nDVfz|Ob?Aoulb69vyrGp>SR*0`AT4`mh8Sh?^iZ(g=ia5eLsRowRef{Q3Oqy zzD}ZIzWgLBm_b7{!RV%GkvGe(Q3F!_&Sy@8MblS*nuV>sxC#tK=^`YziJq#?Tp<<8 zF_-g~=`F#azA*H15CGlCo^?r16b$Gp@_1o!xPeuldUo&G0@dmxZbYB`X@;`&;wm)a zqtlYyCf-zdo#^u5rKf#P6g5LmS7q6ntGAUn!@$`~^PM`Gb%Et>kpAQhpx=+$Kfu>m zUDs_}Ov|w$IpwYAo^7To1uWs9zL2Yq4?q5x$6lInn*KAT$jes$V7h&PbeVtm>{837 z-$msgVp^pY9p(C&UoGCg^Ig+C&Qc{%IZlyX<$o#{f2OUd857LJZnWHOJQ?(_hjtKy zzvu@vuhI4wP&A^`b7vOQ7=7U02kU`~BcUp-tDuS2N~29G(x?3vyu#gezyTXrjxifv zq>(+;Ck}#W~#7>CLhEf=a7~ z&NN1%?+RqfD+^7(AOZ?B!+g>##^7iI{S_Z(TwaN3HLGfy6(F{|uWqf~%VkBa&5yx! z#4TLnranU8rQasO*1f!bms+ygs7>V!WF5h$7!UXl32ai$*K!BN#nORW z8xqaKif}-w>N6C}2+fk%Fbio{3+&NEap+AG_p8mJFV?T|iFN=!zN&nYDDKX&Rufio z!5DZ?HZ1w5`q7V;-LQ!rtU5m8F_@z*wyaVsGarSXT+rml8a{k71j;J^-!iKe0G!)c zl~+*#PB8&3Y}LD}W~L!!D=})hFYS7)JLB;98Hl^lem~gDWF0&7xNY&{d^F9;Pv@n*PJLDZd3A@d-UX-;? zB6f)rF(udW3~f1TBP{wiHn+nJy?phzS2hgrA z+?dxCeV_X7Lc#AAD`>WP58L^*i+~4nRdGkGl~pZW-Qd21tLNMv78i6L?ke_zOko^ zCJNv{y6jlB+KXkkup`dDIaXs0_!riD+qeA#>*vM^PXNa?mcg?}=fB#2f)4#;?chbGi3MewsXaQ;fmgKJB+gV4gvNHh9F>>m6^YQr5~ zaq(|iXr52#TWhj!*qioUipx{usQSbR!K%E) zi_OUT>F}d^hD(;Cp&N=Z@UnC__fQIgE7(yn!LHu~V^GE0iBEPCH~~8#025{V+gvpvko>0BZ#O6WQUwQvz?wrmHj)B*rQR)P9?@QbAnMH8dx&WMl3T zq|pbYX>;vC++=u3Eu*E84?lMg2Ys8^m$ZUI&4D)TzW-SLVPFXQbvlIA%E*W(-U0Y% zx$^!91M-Lhg1t(o7=^Nk12VLwF2r+=&NP9{mJ%IfHJd}&l?N8&?VEY+q?sb9$)!fY zlznvH-lZ|oiT_91CBVFv-YHTFDu}k$#<~BG$Ls}?R)!5-w=`>$9(=R?^zZ@WW`{+$ zq^e9YCh7C2h=&ArtyH}@jE4>7mOpHHeFXcyX&(dgkK$e_7H3{c;UU##Yg&OAT>Y_N zX6Vg=s6R$M&XOoG@}&@wl(KW5c_8+YgiMo#pQ0?=00(Ru9hvvHQtj|#z!^?L_Z}R` z?(=8m&OC*Yt=G=QB(USG3>P#O!45v#9uy@=No zcH?6d4BNltcF`dVWh-gyz_}}|)C9l<{vIa_xWLUKy zqB+VA#Ehbjo<1#ZdvpZ4{$JkoYp9`3P!K?f!6mavX+aH0M#B+2`K>76K*7evwt%jU zY_R~=A5_!{J7VxFBd*B0ll7UVLw?M6#h=9=v1J2$VitqDrwwu>343erTy zHh|f&sAM%%!ogI$`_sj&gOf^bi0SD@f&V(Q9KQnx|`nR!){qNmr>-Ex+#r)o|2T`{wSxfcu7{i>eV*zDC_CpFC7f=nP#EP zpm1)x7=wi2&>?<$9LF=L%-Qui6_80!s~~X_k5>9cAoY*@`87<&Lh{idFG1V@6>JL| z!0G-XvI3@_<`NOp-eFQ2fvU}8OwhLGFX5T~V|O)ZSeq%k-6~BsxXx_%AComGlA8?i=-EXh!5 zNT3t&U?aSm2c1g5xWnX($-PQ+A(IMZ)|OKNn9!*PP7^ZeQ@(0~+(Fdl*Pv3d)Q%~7{-Sbs z9{0`)dR7I$8Sx!>iFg5aD;ky#LJU{oQE^@4m}8j`xs3f^5vlHnFZWR#w?xb%Y?78RIUHJV!BHDn+mbwYP=4Lh( zQ3kLJ_gS%m{(Tq$9ht&FdijP4i0}>e*#1_zxc3vid(ho|X}iY#X3MSJpTG`=0=5?M zs!(|FV(UG0K~0=d zF%gb66f~xj&1PO@4K$z3Z;zqtPXk;|)v8D@y@97P@brh5_ci7P6-`N-yslMg$xb?c z;Kz!0>-_NEWWy14O)DMl-h&bUJJvdS-&9OCagbbAuDN#YN=aD-%O@gG3Dx9x%S7UX zHph2;^z8#NS`!$vi+B7Jh+1rpAUCTTP2b=7eGi)NWB2h?O~t|uo|1qH=Y`?FZmdwR z4KgF>8}f~|fV28hiw}jD9}Dip?@5t{Ey5yirZ0^Aw8rHrAciYFJ*&_r_7&4c)DY%a zwbJx;1b1pN90_f`)GyvzDD5Y%@@eSY6-1}3!(_EG9BSnIlU$K7lUi1)Wn82ncL@t=! zpl-|B_0q3QV1)h=)*Tm~WvNgSQLk1wCV6iGS%2eEMi3#AMR4xx_o)7h7GljA*wmDN z8wVUE*J=Lp72F`Sk$d8qMPjv^K9t5xbrt{8@y;Jyek~by?0(`?tG^bo53A(j-P#?8 zfXMX$DD_nUs%dU=mgjaLNzXIJbNp*dQHo9Ev1z!|XI@~|rvYaD)pUxnkG$MvL#K`A zIh{avV9T@)m(5d19do#{sNm{zXk3Z7Z~an{k=A(7*`H5tG?^&D{`@jT4*4uFoz zZuj5k_|A96QvOBciLl5!OWmJr(xaqd##c#~ZXV}j@ zMrcG*pBJf}c4Sjn4D!p&BTLgQ)Qp-4a@&H>C_KP$_ZE56IPJQtIdJ z|DNAaOW+Zw!tCHZ@mBUl^-|gL`>tqj%R{^AUoe$my5$PGsV~!1B;40VC3oq)W-x}K z=_=ewmd)H4y=Xkf+G-wO^(Z+?Fq9~;PS%fVy2t*E1*?k2(jGJwMmRe%8D*HE5-BVx z$g^6z@J*{5Z;WVPjT9-Ks!<64HY^H_3XJ56m86m%;D2GOJx&KGIR$_NyZliD%t}TNGFeDMKhcZsR zv?JmZZ3Lz4;amF%fB1RJjN?(@icmzl$IhI{{XHr#^HE)dD-s3p7;n4 z^CmGGOs+;hLOGEFbR$6kTQbudMT+wj(ALof_D?&`ULfl9x)V2C`v?D0O9WZoX1qFG zNX-dSkFON=wsQVATF1G(lTZnzIb4jljd6P`FN@%DW}4DBEk*M3fjs^e52g)Q#h<|+ zrkt^homzdk{LesHyOph8g|~=GT2PMd0BO0Fc^gy}cyhzGEJm2SOx( zbTx#NWB)^c^B7z^fPXmuW%$3A&v@rZP)I8)r zw*|Z{b~)T3@VVB+&tPkgyA8FzAeU;kK}R5U6;6)zkh74;Smn*dcjaOI z4hNS%*ojz%ae}G&1pQd#uCo*Bwr~8Y4cTbHzweHd8FZtGKFP*t&l{ur0h261CTE`_ zagx)D7qdKIBopfs67fg85-0shpiRApCVBC)?n+>Ll)?h}b9B~dkjs6_YHL{l$|{Hb zN*KA}iQk6#p{9#?Dtfxmy*R)h(c{9chE~%0BqCYy<*>Y7Q9_nn<%}rstEbqp`UWON zV`OSoJJ-k=ttw?M98EdZNZzaR^d9kTU+n!X7L1})oz-4 z!b*oSmY5|k8rpL}gauNY8_ze28E+bng(b)GNPJt+Wj)yfX_QF4d&19jnRx?Tp;MLX z;w)--K1cE7VX$I6-In;Zo;t1o^}Qs%mFOaStW^+xOg5WLd;8~UN`<7bL@^7NW z*@nmb!-B*D1(&361#VNKFvBcCS7>AgY{s6LbxE_up+ zlZ3YqwSU1B+syOPjN?Wg;o|v0wY+h==?+S$Dv3iImA}{!l&8F@D!fWzy1bpG&FWO$ zK`~X5Ttv3xz|yw-zO*%MCFp8KsaEBseRxnkcximb6|ia5?vD_q8P@$)xmic=w^Ca~ zY8_s z#W^p#+Dv{0IC$b>6D?nBFB3$@)fk9;*Wp*cC9fOyDH}>D2+pG`;p8|TjkUm7dTB+a zacN;_+j+Kq&IFOv5SZXU{7ctq*(-I;?Cz^jX zqp1;IZSR~eX_P^uj=`S`p{TmFLm%)oW=$#FroyfYGzkYn@PIF-gT_Z20U zG4y2N|7nbk8dkjUI9~!*rOJbWF`(7-M0IJ|{tyR;#6!TKI+)&dhgry9FLDVJqmDl@ z`w0ZsA;1QtqU4(+B|C<}&vP4f{R<|-M`z~qDOY#D`|9zja&Y}45nE@#9?doz=oM;l zCgS0^QpwMh6&>TCHRpvnZ~!EWA8MpRC%z#@X(F1<(P3i_zNr0An6>%mobs#jX}LxE7R(Rb4l&SV(h9<`1-TMk@p-BdGWD@z6FQj-BcTfFPjtrie7HO1g%b2t~-YF+pl zpqbY;3&t^v+uO}KZt`9@xI3C$Hu+p9T>EzCl#TS*^Xy==yKJAkT;}exF~bfVU2C0u zVe>IjF^P~nZLukT&qMMbI9vn~r^HTw)z@d|`vTHS85~5i&Kcd_4q=zuy}azrptyQ? z*c?xI^zsray0b=|qIG@9dijsWeDr@9=2K47u|vbfDWb!2H`^2b!1e^f{}G|31`DJ- zQ^QqD@V~O|oMdWTIh3^(8kRgK?r*_B&I`xf5Ont9sP%w*APhQKP~*83M0jJ=SU)z2 za6A)pp5e0$EiFA0!&T})T>I%h&%T^Ry$)eq;4Q#p8zgj)7xWt?ivY>GYIIxQUoBrx z`+5$J=Aqlnry`new3%#1__36@v<&3126t*5vIYQ+HvPK4V6&qrBh5=kCJ~&-TD=hz zKR*V+B@%~=qX)n#Ji*~Zy`yAc1$uB{XdPJ82w0>+9ph=X5SUyUq_{lB{=fK!-(7-GifNQaT$Y9m) z@|g=cXY-{MmzfvkRbc658T(J%1Zz|>SaM)p&$F-(0u2X%>NreG_5-xujkpCC^vw3Z zNYjoLN>uCex3sBP`eX;Jdt?z$uw|qnF1;t;@_DqHBt5g?&6GtH^4bdQn_7ddKA;I~*0n5+u#UAn}Rx~I0pAUmyzz(3dqrsdZE(gK5 z!QHlf@xa;~x{!QzIsaG1h>2V7b84QQ{1Lw(@14%sW6?UUG*hrOzYv*xU76UR>@9yW zkbOS>mb~FV80w3U9vUWn_LRA}h7;a?v{OD;`dA&919orWRC%{CQAkr{+~wLR7OKD* z>uxb|%XW<4zRM+(;o*zx50fOS4!6^ky&Q$_6FAjbJ2D%7t6tKC(!3p+q+4@3_OGKl zn8k%;D3Ix}7?{z@&BbDwYs|SFH&+>Lamc>s#NQz&kGBjx3E}Aw=)M%D$Nk3t4zz_V z2rp-EKX#JaE^l7|dMq=EWmucbp69HCoK^}U;wuUHdr?JnxC&F>7-Z@O(%RI?+;8Z( z(_(tM6p?m7AV_IgnaiI4d<RdS1PS0~AxxE5 zvF~s)yi1K=#+>3h6FL7qkXMxFvPcYbU|d>bW?q6X%9>~Vtimicf$-~`PZ^KJW`Av6 ze6sUxyxdBwz7_dkvITRjNMY9>de&<3S$AUy=QvY^KecLC^rZI_FUVw5QU+b#? zRfwDhnYC@-JncB^v@u~Bm*3BoLsFD9gEG_NOm$4ZT3B8qiS@Eq6(ivo=H^U>N_U!% zWsn-otTPgc{^bKOe$M&jf~3I}>3I(Nn$RO(u8{RSs;(z-N!^o%Hh(w`tLsK7^K{Xn zTET_ERJH3p)eVH<8>hAC5uvJn#-w23Oqe8Bl&V!vhn^3v5l<%QmB59B%@i~$G&mcl za;y#;7Wrs{31EUZO`n*+vz$@6%ZscT00Z!$wfN9=4)%PP+iiMy2P=(#!3h2VAlsW? zi>{Vmyxtf7e`W0c|737STBXuciJP=$bo`2(wQ%e(-TR~}QnvE(mMZ>5O^W{@AY38q zR?YqhRjDCewn39s$X1hN2^=huc2gaw7A;Y8P?%W3Ym#&1ZWIoQu?{W@7DxzB zrsI7uItF5wZB-_wA_&bOurO6-Ka=RMNRN!n)>T?mV&Kdtg*sT?)}AJqoc@c|bO5Zj zHKb_xRYPz9eM%tXGe}L`Ix>LOdbq#HbmeMA;wX*=WfN399OG_vLKlHda|=_0`}I(Xd>h`;Gc^jS6)U_ zPcoOi1XBb-1cTjolutPL3{LGwiJ_aN)HJ8((!xelTT;tOe1;6Tsb5#)x7$-5cf$*! zGh2a(NXKhNA5B^lRUnOe%5t)Cl&t%%^2Ll=I@%H8;QOT?V~Sko;Ha?0flC2-^C7F* zM^x3D;RkSz@j_ zf3W_7(WAbkTu)A$SbN!fiR%|uQKb1S;fXo^Q^toTmWJQcV3Lz^>P zy`ZQaJxxM7o}ti!s6a<)E=x#OSy8^xE9jc0?%b6!y&O{hwpY4WD3NIar`$L@voJjM z<&k91%UDe~9)W~uvyhttH3vWTLr`T!W>QrNBRH%}#{`pMRoQlw_nhZJfM^oyDS+Q6+vROGfq*2oI;Z#8(Tg9q}x9n&Sehn zav`7xl`u4cXge>r_)Opf zK)PYt>u%$C>5T`H1sIKifpn@7=G@ATjU|$st z=zjp-Q)cyJIu}+o8TF*t{9$F4eLWwqfVUF+6uEJ(x|X6J{$;3*2z^dXIZT41RB-oFuP;VYA-Ti zaU4VEV26?Ox{7)~fO_^NS>^Tj+WHRsI1MEMS4JR+JUz4L~m{jEDZ&OJ!OEx~s z73B}+MO0-vX@(2@>x=M&N%zILF~}H`yNejFVQBh{d*$ZWrH&$#T_e=3b1doQiC%5f zK9u74RB3~(Z_pYEN#Cj$-I?(M@s>)`<@0;eORka@mY_ zS)-mPgyr;g)oQC~7{U_w_<3Uc>0Q1zMzLYUOTk+ehK5vzEsfKv?COAhR{k_9mPl`% zFt@~dYnJS8kVGU3|EYKPpfYc7w1qNXjkH%}M{aV$OvWXI&1p#I$Jq_^XC0MCMjvUv zkWldFuiiZqutdWXTt7?YsU{eM3#;mwyr{x>Ks-E~mz&|H@%>qNQ~LPeu!YunBMlle zvF>xr$;x8nq8R&ola#%T;p*QF5ODWm-Kq6hlWL+{%EHYHN88a8hvSmeGV`KjTtsAb zE+hT9O_KrIJiJJht+Gz+%NgCFt!P}&xS1sMCrT*s(vcR}==4T0bO)uC=**J*?uaLa z?SC&JPYPSf@ZSuWioDhfc(=_GWeB-UwG^aV0^ zF*(nkm?vkWo`@%hElQH$7$j~_%4o{%XW;X{N_5-5Xp?SrRYFu&QBfAb!;ph@1S(E(ZLwBXn-2Yb288 zmgy#5)XeIAozA4fx{&NVC##|)?yqz=O?k7Qt<&sk_S9C7 z-oJYl9s@}jIFea1G9c4A;zZ zim8vSd;}DRnl{r6zEcbi`gh||lrWUr@3ogS34g)<9RKYDvqb@e0EdKtfPzJUgZ%&t z2Zam{0SWa96$}lX2pWZ$34>Wc(7Aq=kVL`2$)&DgdWMvdMPAXtaZbq4FRrF`3I>ym zm0w9eFt@Rr&A+dIbLT?XC@45SZ{U(#S;hEg@Aj@p!momW#H9Sf>aD*^U~u5zU?0Ch z{sp^`PRaOMEuwH)v%_|H)9G0ySMa>*@q=OQc;?XiUN=YTzUAgguYI9t`t_`xFXi0j z|4``_o7y0$Go8ckW}0oML(Ps&Y(Tatoh`u1krj;UH3j-e0gawu8?5 zC=Z6$ZQZE9U~d22S_f`%{C8`5z}T9q7F+8tIysBTWDOZP7e4f~GgxYnvx&HCyBB=};nr`}OjqZ?CaCPazXO~hQ^ua0c@1aah! zNaCgrkKq1-?Tt?V5ng|;{IMz_vw$+%w~Fl`Z<`h=a5t0;K`dRV*3`JK@`?1%NC$14 zc<$7~mI+~W&Dw|Jg%O6kBMIE>WJ%-xAAtSx;HfAnThwMveylk#T;lV~W=rG{(_k;jNiUZ7L$Pz>sbUW5JDOXg(+$U ze*d1UdQeeDe+la&_~EjfLUXT|>`QUs?v_8gf!?FIUSjLn@yX*TwZt&aqA^#4+?r&C zjRA78`NLw=60mw}C=fC!!^3SF#H{16|B5U-8wNuJMVK;Dq>n-=%Am%pQt3cSU{j=Z zhiPR$e|J%mq2n3tnr~$8+2g_@^DQ$x5}8fn-H5BoIDXC1<5+m+>#(c3`{A{&ck<+~ zlO`p^ian&vTft1k8~<^TP6Bs#-0xnLgUK!7~%{+$1 z!a{|o=u>N(r<1}Zd^tCQXDu5(?&5F@v|k#f6am#yr0(Cq5fN&)!xPw=5K8MKLj(v|CneAepBdZX_+u0ddUX?YZWTPsC^v5<7n(Lh{kt0 zcZhjBEsf@Q6TV2<<;;p1z4PhsBm-SO`y5~ee>=)fUcjK3>dp1m>^!Lc3ueKMUTLsa z9jI~^c|BV5NXt07J>4x?CxNY6h{3EX*d!~1=8be^YxL4yhKHcOt1-{%1LK;_DQn0Q zVEm=l#1n;KwDW>ZGI)d3ydc>nY{51yO@kCVjkyjH8>W+2Y^qIAqCU2ICECTf-YaJ3 z40+H-NMjSvoYz038*Jr9lB@F(4wQ>M`WwUT~ZcCmb)B6882babp3fmE!C9$N^?y? zKK6l=R4X!uEmiL-w>iy;&+hv8dFx=Z^6{dQ=K2r-LqbJOQCAWUfcJb#14PKuwIH7uo|iaBhDMx#DwN;B~beWyp!Dk z#_DFIFQ3;Z9$He9jS^Re_vCiHisY5bgcvPsqi6b3uo5HgDpq%A)RYHEV@Hsd45DgW zpde%4et55ntsSYMO2(xStsn}00W~itmypDhp-0K4(PTc0TDre%?mW2Y5!0!m@!1h# zFbaDr{8BfLUg%&hVBEgbq+-WT36AGizi2TITeque$?kn~0+-T*iDkt}Cclvqrjz8b(p~U1SMo`cz zoSYC2~n2VG%tg{n~L$bm}#!+m4Fv`9 zWZ2A-mDl|gY^R0bM#NkEcDCaU(}?G^hvCyOd*knz~tAN>U`Qxv|5s^bSv1+CUbw#X_A^W@6s?QeA7*X;P)WTBW~vnvI4q zI*%*~(odA%OWPQn__go;2>h#k!mN}!)Z{1e9h7m&Fnh0N$JtN682fywMbIr#Sw+4i zDe$X6#zHI|nm>s}7spePyW8>7j;}MSd}Ofk#k*^$s1gcUy1}6nC6;!?!;ULnS{=+n z?(kOIKHsepi&$#-b9v*CTO7_s?A8fao+L(9<3*~|x4S0i)(}`7x`>AZ7P+Z?b=C+ z)TLF|v35DAe%8&lQW>GzR*c(Nxx%0qvs|;7?^CcJnR6XxNLa$j*PXl7BSeuM_?hlG ziLRyt%Ic3Oa}_^(Xb_!}GzcmkRzYN7fu}HG655tn^wv2J7ItnDtEE3bP!&$9KUq0H zuRtJ+v2*Zs@Cq$1@rcS@_e9V%unSgA2Qu7LyGTgLNjIJiz$p^h?daH-9HSh4FV=~H zb-+H6y;%Pq`{Sv|9LuPCg->gSlw);!zmaSAk;PLh#rwC~Z5VL!XBxTc zko#A~VN!Ie(oJTqi25ouo|Mfru@14upIXUdC(zKBhQtajG8H;|d0KyTc&4Zr77`{o zKas~}e#D{NL*EZtX^{A8Jf}Mgxz2;2>4h7{O--c4XbI|+Ux^(!)7+k0r=@A+BBKNS z?zf8A7N&iF!7^W1eq|EOL?;q^uW-wQLH-6`f%(m?SNKDKR{56s=P7AT<1Mtrm5yYa zM$Xq@xd4?_=%b{9e{+J%1I_B(%@om)DjkNg#5XPY%I0kSX* z-*fvln6W(6p;THLTB3d9LLewSJw{R-PeeUEMWr8_urcnUUC!feA=wb}D~LgWL)@B| zGZOFYLwG0BviknxH_uCVizCaaLMmL;61rGTQjwi+5pTW^MsLN3KO7W)e9_cywrE%p zZFahP=zQ4yweu}!Rj&+i+68xZ@P$7erL4?IHQ|YG!zJohq_lSpuZiyrwr(z*+=SFw zW)~Dlz?Iz3Gokmc=S66LMR8`|SJ|%uXQR8}qPp@u2?TFBtBF+qP3@{{v9c>0VJ8 z_15rRhxVENc--xN=f9(-pBD$wDUw^4l71b4>x8Q{+V)q z(R$}lP;*lm5Y$i)_hm@zqZ>>4T_HJ>zQy~bUr?|qa zeP9b}n}d$&W9#WGAFq0g{ox}qo6+Esq@=(to;-qp}Nmg&qWQ{jQ7 zTW&ZQ6R1EedLmNL@G3}4O2M5=`*`o6Jw$j5*F)1SvB{gT9UHo0VjcEdi1OSX=Le;k zG?qej3e8j#an;39avIx1Vq1@Vzk-3gm4yCajI&c2D)E$uF`mp@*7$D3C?Bul5}T(( zv4o`UFm;pdvCQ!61z4$@$k2ZI(GO>04xgsD`m0PBWPe-8PWiJj9iC zRm+{w^txel$xi-|*FgvQmd&-qoU#;(aLa^|4c(t{u{~lt>cUE|pLZ#`kvo?siGk5E zaZUlQ#u8W8nqrc6GR#`$j3942Z+qN02;vu||Bot|Qc%q(-m}x?#P62%LSWtT`k!^j zrv$UwRu2i-7|E&p1xq~u68_*Wb@GDyxLjCutRuzT3r%^&Oe%MrEQoj0GxzZeSbJVV z`FS75RY5qbbYFRc!}{p1i~GpncZll!p5LJehkIt%4T#RXvmbpivA^W8i^%Q}wR`BU zsEh{QI_0-ZGH0wf1BssO0ha9hyXLQPvu$NN7#fKPR~RG0>Tdq;)YjO*+7#hN{@^dxRq^+gmRlAF`d zG!R@U0#Khum8a57V^_8Uwy;bonw23mLcz3;qWP~Sk6)%|4ejGA{9f?m{vXobI;ySi zYZs*jT1t^Zk>VC8PH_uRin}EQhvH6gEfgpY0fKvR2ojv)#T|-Mthl>-Px`*+-tTws zxZfG)j`K(M*kfmBt-03DTzfvxoNG?4Jjif*WWkJ#j5TF!s%(y{8~^9~Id+!NxU`6^ z#+e9wN9&KZGCI{md1DG86C3r*Wxa(+0u4e;=M@G2Cj08SW+RVbm;o5vh_cJJwAn{SK9o9#W;;$jc;< z0upAF7Yb+FA^dRo?kS(qQ*#-o--)?9mdpmCM0GDe#3zh#Z)hlCI(eGtNm$GsRsh64 zbjmVai^k|qvf|~+G*0GFDi+o@g#4IWV`)u$cyiqPpf-NHns+NuBYx_~5I={^sS9oj z@w?~wC7h`UdTJ|7AFprgeRO=bkuwxtcjhO*rpPwfx^Q}Q-Bj}^v3`xnL2t#;zZyHF zdnA4MY*;39=G@vAHG1c1bGN*WD7X4NMeZ#5+uzZ9U_UbJ2Fyn$<9vR)aAr4$E*FeR zkjwtuD@92O!mzPgWW-_OgxLcRE$L&X?gsh1e~HsbUkq7AD-=0HubkIBoa1ln;5`d+ zui4weJhGY@A$%NzujheQx^hvUztUyDZ5boZ*}Vj03`PR3Ik+XfG6W7 z>`gMq61hqkb<3q0XnyqDLz>-p?|*EkD>c`N;@kM>MYW>B3n9huH1hR%G)aF|blKJ6 z6N9kQ3>GSkY%2%tTCNn6qP?4$1;KCtmUAqa&KC)q<39 zg~_f7=~HP08BH`mS~29EQcvrr9Ygzxu0bWx)aSQc?Hp+|VS6OxxMOB?YQw++t)Dq} zXY1(6afAP%DU7wpo;-+Yj(OO@>?w+%ySvA9eVqJ$lhyjx$?4$~B0`t`==#P<6g&w5 zKk~wi-AHMyxk1CITL}5P?L^izJkR=v!YkN-`Za9A5 z-fW+CkvTp15&P{;zpS;IVCokt2n*z5!L5wYsEp@WYG`Y z|5No1@&2H2QTWL~PF>-41&N}DHQtJTF}mvbp7)e0&c{?7cP_Hst=z}D5_j21h29Yv zdb;1+V}525<-H^WOnAo0_Inu-a(?V;M@D_X>-it@vt>K(HYeL(k<9(r`+RN#IS zxa#o8eudi9QcD;FCjWjkL3H8}{T3M_9GKwEW?V=@IOa!;?%d9j3v#eU28I2y-*A72 z1|6sWxVrTid~NZMjf`*d2W5A4 zroGh3c?KDAhXbhvl7K~hHu1k>kXo|^Lk;D}HexFA*fG?dmQy76R%t)X(9`EWp{byC58 zvZ~(GoS*Lm(*2&`NpXB$%s}?oINoDah4%8T)af;h!&~yX2nZAo*1~*- zWen5J=s+(cAuS3r=TU29R^Pa%0E07#41)#K-6U?_w`v+;OA-RL-urBYAj6ZJ5p|}< z?BEwqbUenj@(c;rVkL5M!{P(s{5wKdhQXv>ZNByU(!)$`sZkp<5()W6D%|ovQu(jr znOg*?9+n<;vDJZ~Hq4XS0&Za8b^OnR5Rtver4O#l+Ye^E?UN5&HhG_Nz0(->BU;Ad z?!D)87>|Y={czdK<3w=-dXw>sd2x?tHA3`4T%su(rKk~K-6`S>)#SfE{>rl=UWxj- z*M8!*`#Z6?{dNEZa zW-ANT>K_Kz3r`2Ouj}yXUO7^){wVsjL$x<|+el1}tB_fV!+9zEz1 z%fAynHh@iDrF$Z>x!QY(1OJ|tNY}lN#fd_Kh3UJQ{YQWDP`YHgX1{D-5-QMnU68~d z6g8pz3cs5qWT#7{e^8>^k8Nn8Mw7JYk#@4BE90Y+cgSf5<%S{EjdGcfsZI3y&vgkW zWER1_mK*myO3>p+j#Xg|tL%^${mIyD>vFzrTF-uoqX+&Ft zCp3vCDl#;t<$Njm5+EA0JsCk2XsZy_N_KVQ(gg;Jkb$u(HKyM0eaFQ)qz$ioTN(a? z%CB~+Qrk=Z1@VOly2i_UH{gZ7V%#M7`-t&~ikYcHzhuk2<0^Lf4ZnyQNF(YT41tlX zYw0kN{t_~yZ4B_nsy9$v$w{puY-lqD%MP>^J9W#%zDHH(P@>D;Bq7ZiDqN*YgctG< z+ssZ(RrQZeQ1R{d5V{{m0%t_|iguQa+O`0-IiWsI1M0ljv)0cOOk$gFZ}0PpV|*Mt z?jKln2NBO@_L$0XN_KK*UdTzQ?<~bhy-I~T1Ya&hw#C^Ko<^x)5TbY{s8PU!9(3W| zq6Ex7jkyV4Ks;k2NKFg9+V4LoaXBm0>khKN33nRFa=rzjRA}YwkY2AD7(Oi`J@)xM z&b#vFLECF6S}>=?pC|+42~(h~8M9G-By*{{w8U!ATzkR^XA3~Cw?}+Go>N1HOE>GC zokIlpVEp{Nv!>0TR!LhSwch%Aytn`EeWuSv^|;tf-jprg6bK|6eoCDo8K#2AKQ9|o z^eJ)V{gA`JA~GC+RsMBYJ{H=^Xz0~&4TDzlWlXkeyMFDMtC9@aYrpnLD^{fIZDsR> z@f2WnpS2AMmYEV^%T@h@GNoL8@o_W5j|9UnBI%B&%x!-@s~r)zI)3HzS)N-Am5PUI zXKH@%TgFF^+pYZ)m2%$PMdW))kXU6h=8Y6Ol?NXHHQu(Bbk(&(o#4^qGkHIK z_a`ILtu)5X{Jb|IVJheAY+Rf^IUW7B@@*Q;AY z+4k)Fz)^t^vP75HNU@&Ubd|@vvckB$N{kGjg+j$;dr(sxiIvz_v36 zkcs^<#_4V*ll6A-`X7`7#$@$=xxTD{hA-(dG+2mHh_$?@Vax7Y;freDpPEq|zTV?E z-pwRqzja)Ko2mHEqdcU=G!3%hwi|d@bu0Oh9%2tvj@P=JmCa?AM-1qAPpMcy(+X5) zn4+Ye%bO}4!$m@nyPV*R*Qu;{gL=qL-P)1yeQy+NG%n7EHmzlk`(38h6X zLElM5o7ZkmLQ#(HnWy!`#)o@)aVyyejM*DrwQ+%7@5?&sXxiuP@yQGBRTZKfx5O*i z7b*%?1#g$nUVq=dr=7cQ4DqA&+#4pYe6-)2&y0>gLtWiiaFKarL(rW5*1zfM_=7U= z0*z|wv!{Uq?ncGtoG38yNbJ()0-F+X6qwW6ly}9f7+))QqoaM&>*!sju?SFfjPb$3 zi?4Ib;ha+MA03p^Z0O`JdMX|MP9?k+*Gwp_k~w*&k&9I&y7?hI1iFw3%y{GGmb^fc zWVfm6X0^KRW>r<~R?1KRiKLVeOmt&^?uVGz6DndZ=8keV3AfjJQHIv{;|T!&te;4> zd?8U9Zv>s|r$(PJo2Nzm;tF|rr6`vDcj0o{vPAj*Nw@cEM=r+;X0?Q~3$xA8qdudE zr#Uf0Ch~bv2$DquM^NXewXRz*XQgUUpYYQH9YCg9bf*YU&hx{rern@KZb4(MuCYOE z05qBhfj2-HOWVwIUYWVn;q2@J<7rx=glVrU-FmPQW2>f^X|OVT+voh)7;XHqn+jtr zj22jLpS~jL8YT0sF%}h3peKP+^yTzPL9n9dQ`SO_!-wY_y|wg9F+o$pQcYOX+5mbS zms15Q&e6EX7Nr%DhVO7DjxLpSg9~iQIIN^B9-^}{Bm?y|^ZrdlFZCmjI$Eq>WG+aZ zSKOB&{3B1yS>gMbN#L~Ar+jzR=5lvOeVnxIh7oO(fd@B4_Zp$ zDzF9fQye`6+RLtYXS;b<14-9M+@iqYIK1^K5o~gk zU^?BfVkbz6ZAKw;ubiM=k8>W#vCJnrHCRzDlrOaX{NAV!@C#FsK2L`r$st#&!EA2# zmsx~jC#xB_A=Qt=PmcGNpT~|-x)p#}lHBxr=1#!jr(!*aO@5+ZPD1W^k@W@;Y;9|Z zKw7_(`^IY?QEVIp&bc#knTp6S-tgO$e0##dhVx280&phETY)TSBR?`*#A++Z__pZ- zydv59TVkdz$uqWkuH4lntJ>(e2c`sD-0ga=?~h!W$lO{9kv6n)3+&|4UvT;l+c*(l zB-tL@5wV8Vh?svrd52fu#J*wUQv3UkL_ZpR&hSZkNWI~>;}LGzdmQ!=M%~|A_uHT8 zL;AUL2j&fN*;jGTeapxr@hU0@J~*X>#5I4VxF4k%eBcq;VP*~W)A>$k$fx#sq}3+p zF3M*{>WcKHJmQe|OrLncX0wsw`^>P5p(7%7yLHWwckFY|f8G}I_cq)GoAaHb;%^QQ zMQa*=P;N0Q`z@IuzLk%--e>?p9P24AaWbvd_0yT0bN3^CFAf>si~plj-Xk>)8PpGn zXqQ10W_tH`!LP))I6gYjw=PNnBUn|!n5SI;`dVBSN18;`R3m%8$Ih9VnS0kqTd`oI z&R-*F!;4oI6q3&l|Br8!alr!xw-r<(H)GpK z15dg%-#5zmeM5s}EN9W(gEg#9ZQq}zq9@K+P8Z||n3`TwJr}4?_1xQH9&Mcj|}!_0$4wF(&oU6P*+YXS><>|=wxJoV6ety z*D-if1})d#io~h=5GpMDgK{tMka<>p44)&gUXTIhQZ7tz_KH&@;^nS|6@8*K>#6M4 zTX&P;l(2@k&5vUFZ@TgOJT+!wJ+LgAF_Z4edxY6oE8AG`{rEh5lXf2y_Q=Da{GV`p zLxs*geMed-k@_W*w7)mDarfU}_+|?Z3Z-j)PJ(lNkUdyS(6L}OKL3_G2XH4Ksw{S9LP@dslr`C1IajXy88!NnP@#6V| z;*OBcI=tAPcvxy7^=q2RRk`vyw|YD>h!R`;uY0ol0{a4l+jozzBpyDh+|+lTw?SMG zplTTsy5{Gf>ZwMHCQWU*Qx)9u<8Q{rJjEhXn^h+dionJA6O+me@wh9wJhK~HVao2z zEqb5dmdad*Cx0*GMNiBWvds@gpBJndX0=!bNu5PpimD?X@uO`2poA}&bvx?eOuOGF z5e~Yy+0+N!kv_oNTBlfj)JdI&Rp|NT@SZzHl=ibk2kDayGtCzyEJI4Iz8;Qo$~aXD zfYm!In)@g=Z{ZnCbMn$@o|NV<$eyqUX-mY$tlKTftQn<>AFqoPPk~u$5(if0|(w}Xnbg2f5N)88`>IJp2`7RnR z?Z%>AeW})H#%dS)YP`qXu(ec*8%H_GRih@NJKTC_@ys1hK&s!LMryyV={3B6Eq@`; z%H1T<9j|LTWuVHule2Gjfl@28=$-7Uf%Y)BT^C97>=_*&F||7?Vy@)wK%Fsb=_RRq zp*e;aV|Q2f-Ca8sdP6sh%*$z>1&OcwNv=rQQSZ6~0+dAA$p|4V|uHhXkfgvG{96)bIg@Sfa7TLV#qKSWK!a#EUn-{{#sL8DY zJTLsz_susuprJR~A|~naD;aL)Xb{D)>&@Ki$GXq z!=XH&aOohmu1j6-Jqrs_*D8_n&jys+ex*!*Q24Hqi^XBW?rQO4f*;AstSDlK{eSAd zzs5|7P<`8VZpjbC-lW&s?qkNq3Mq$H^hvWFWJ0u=Wo)S)*AQ>)u?ZIvFf*MqL#TFM zhIo1IP~JJy?c!JV77}QDkzG#fkZ0#%k}Kjy*~%5T(+dgwb`e*0<5xa+{9DTFob;HG z1K|>_x#%zr&d_s(loUcjgji<2-CGc9pTE2C@Xmg$D7>Fvxiu_lgp`CA-h%gJb5hHz zEUy&Jb>+z4b?sF-;PS^D`f3nr%?Z~JpBp+O(zaW-4Ee{9FMWrcy9}S-QddHnO&`vA zwySW*%){tnQ14|lPImKg*7ZgfPB_Co+Kuu91|kW|hMb9yc8~X|Am>RCD!Dzpiyu*} z6Y}kJP#zNDCQ5lYdcV+3eSw(!Zl*ZFn!N`gEX7IjTQQU*K%jM@$=iubwrC?KhC?+a z5h}InQ77=4mN7?;StqNTE8R_9DLqC1C;z^mOY1N^TnhRo2jjd-UFh;ty=efBcnGk` zeK4~{6mjIL+Mb`VSddc6THM5BF=uc>aoks}v!#v0B_uxH!^zgLwY&`|)JBG1mTL#! z=LAnnj`eB^Y0<>sF5lCr5DKRIp>Zv+-MrH+&*V$A$}mM>S<#I)Q@?+HNC@(0qR_`<=7 z?|$>S^d3GaN3e2Bwy!1flhEZN7&OV-$)okf2Mj0+ygYv0BWholeoSi#rO0*i%n%=8 z$Skl9l78SMFC9$1DYHSum1ko)byGK1Wo^^7XgO;PIV1f}e%{6NOnL5}1>PQTvzY4T zmffT-e3c|^D3=h;_&f`zn%|8maRL4xlq|5|3wFR$#o%D`Smt}iC!YilaAJc6u$J|D z2oA5}$)RxaJZ5#zjhLdQgn(40f_6a*_N6)Qp2+s~_rLkh+bXQB^>-}XW%8{x6%$<& zM}xMcR1?OQLL09C#9i41u0i==FltMqf+-vNSaB@PujCr4FC zY}}HiUYA4&Ks-g|$;!TCMO615v(S>{(I|g`b&lVNbQaMo zRch#)T-7jA4r+MLk0(>TV6R4359gc~FPuhkDq;w$E~0rC@Czy^z4Y|0G?#W2+b^@O zz_ZV9kr@nc#gGZu&i!1GO7pmfRGNqB_{*K8aExhj`_*H(d>igZ{|)#o+IO-=a(q~? zQ%T|id?%4k*c3e)^4}`xN~751>Xr8c-QE<10Y%{MBW!nVN5ojP>D*y@Qp5l)X{Jc% z9;>xUd#YjFPCD%n&(`cMBhcHPw37DcGfZCh=+6=7(U5DEJ!s55givg} zu9blYzez7ewN&(*8nOYe^o(o+S`y%qL ze1MNl8dKexpRq*#zZN~OYy0R6ffl60LC)(!1`=X)+m_e({Z>UQy1bIE$5r*tsh!95 z9W10*XaH>GKPY+UNrz?XfRv$u*G^U&upHwe zpw^YyN7A&4NJcleCQ5i%rpxiyTGh$yl!!)w!t6{2jIN*!Ud3AF zpA{~Wgaq#-vy-tFstFAVUmWhjwT7G-vA_P5ukKFGA{FUUqYWrkzphH7@Pf!b?oKJ{ zu{x@eP^K`QK{e?5{|XKaMNuQov#cM`;Apl7V@^<$X@y2O zNp?9GJ-0?)+$edh96InctShduQRvP3hkPA|(6EPpdPf@&nvo=#I^ocb>!|NZftwo9 z;*9q`qw|tcKiCnWxkG@hD=@YPYgt{kJKp~q#^S0O`h(IXDMjgNgJ|+hS>_!FOMvk1 zv=nR3xjQ}jnNkJtgYfOTbnPsyX-hkE*NYQ9%?o1b1H?KQcgASh>k|%#L)(dt8mPXn z!CUl&ngz!UTAmwRa7>+N9))DhSbKekk7kFIbS~mv1c`0_#vOmqA8*kEi;W`;vBw?@ ziJN}!a2OQ*KVC0A@s-_dk^B*Q zZIX1lUJg5ss3|QJr@`W)?^`RT9srnIW)99mWmSVzlrR%AM}O3a!2KHJsYA4p#-{k)$BY_DO980H{i|IcNI89bxJnzqx#95R8bL> znCX@yBHeHp*el|L?0ycKz?{39=eYsZ--ulM3y3s^vAb`|MM_f(q&hN@eW;n3kTOfX zh8KZReBL(09EwYdt<1s;;MSpe!37?xv5HPj9N~+J6#0CWKeuN~2F3@DfeBob(-U!k z*qHrda!fpqP}Y5W+1@SEww9^)D}WMAv}6xuYl?RJ56)=Dr?@LGVo*9u$oaq0 zkP;eZ`@%))H4tv*hMH&_Q|zrp+)%LxbKlpPQc=NVP6yKkGZEr}-#_hK1Eghu&V%U5 z$3TS!B0w+<^T`I-ddB1td_`lP(99nEd? z&tu6X`8uw}z7kNLlcDE7$R``$Kyl>!(c9JWFZwVb-$ob&BD|7zZ&lnoNB3$)^uQKB zJEv9-LvA~=&KyE^e9LzX%9*ovAc){?WJTYI#iMg%T}f9DH{6n2g_7vJi_qIeCQ~S3 zoibBNG2HBh@6}w;iSm^V^J3^suF@s%@?|#uP?IO9MGA0d8Es)Bg=b;Jsr!xLQ0N+& zq6GiYM7?@arEa(0Y~XGG3nE*CC7PL>(Mw)?j=+tU+LbH}U9v|=(1D%DjrU?T5?@-} zkuGCnz|1%|)Taolmy0glY8+uh=!x4e4x&g0!XOw1l9rnZc~N6Oxhj}DlZ6Ot&iGWD zEh{^j73iw_2nm>(fTP`y5?)qaa%)I5D1$_UEEr><24@R`|1@q5&XZ%1AaXteMD_IBoZBV>VXBfQtD^!&?&zKAZq$VTH-&C4E+Qh|rMDH+=T@yf-^lf9-1RpiI^t%KV*_dQ@uo)a5aZkq1scfil<2hngRO*vmGhD%Hy}ARpjuq2ZspB`C=xO}4s|idTB?$R#`=)b#4kB!F zG4=3RtPzT>h!jWA4BG3MG7WaSmaodql6pz37I#aoN_PY1cFIZ{y6$f~NE zkF(FoQy0H^^H~*lo0fs}-p3!FNxX@+3v4cs1q@t*RDn(p(HCuPQ?|DATv{~_1Xbc( ze}QVwr)aZb)>--!%2i8r(@}M;e||{%Ev&O9f5qvSacJ^9*OtTINh`@|olN8)>t&4@ zNFq;uffvvctYF?P*JTq8zi`<--a_I-3h&7_$hdbarJsKB?rMo>FeL)lMQobj4@#Pk zn8Jm-{DmXv4~l$?^SEj=t9Kr_P&OrL6`!tfPvh{{p1EF76_SO^=t2wS9X!Y+dFpK_4#8X|DL?qolrC8V~fn8^Y-2!ln42^ zl6LI9nhammkeOSF5~SG{w(HeQ_y)$s)He__e4-N)BkZbc*mWPoO7 zQZ`ueywUOQdt0POZ8IahOJj(1o0eQdff;Y$(oD4IbM8|ID3dlj-FwcZmgyrM~f z=-9u&W9Z3tkJ6FkMCaBnd~SjcQ6czRsZ6>R$OHIr~vT#fcM|XjDJR5Xf)m= zJj^|-4I1VKrr9i2ZhSEwwt;zFk*f);UU&Yw`WcuzwHx(rKgGUTXyD^e7$s{P9E{p&^}PqM#qTi@g~fwU6xlpwmS50|7gD5Gld;e!fBfZeN43aNZ7ELmqGP3V z-xQUD?nera6@94XJJxVrrd(+c2$9op#P-C-2`r8tz z7Oj8P;&ZNA+Sej@anaY)dV3Ocj|Csa!i?y+_gYW-P=vowAtTQI+l8ks@;?cq-?^go z-*%5#TU#U$FCbn+jmJS$Nai;BDKgLP17wG)Z0}+8%gV)L?Cz4oRi-ZjEn4sIB0}+Z z5n&UtW^)8n$%_UdJ$tiKKA&vOm|GxtWI;%JMPu> z*)@!kn-RO7^|g!!DxoOQ`)6t_3WcsAborNIE4ln#@*hm@LR-_7gH1PKquA*bfZw-vtyqzkDvK zObdMzFN{LcFMx33UuDW(-UzPeR6PGR$R$VCj#X8<#~N8$T1H;x~g z>@4Gpe!oSkd`fuH3Ef{1yFW<8d_?#3;E-u~c{9AGuVz${(rOJRNF89M>5^yxk(QQI zn?$xyD>FFrcrfXxZzInk)XLWQzA}8Fcv!b$u5X~`QvWi>o+@bvcd-#3sR00)%_T4e z0vf9S;?M)yxpqx~pYDC*-ZS5`>{gSp0+dk0jE8-;2aE90X)0R*6h3XTz6Vn}n6A)h zZY2t#s{UBd>~Kg*x=uSG-t*(2heNAWEsw2Y`TT{0DtYRhL?pV-o(eX4-of>Wz_}M3 zEz_@lfv=#v2avm#BInFOg6kRc-l`I};m<>`ni$7X4GQ&=%W!$7?fkFt#t_Fh_9#^L zwDUGnYJ>&^xEg>{1r^$UEWfX6oupm*(tG-8C}0rLFm(X|x0H^lj(efp+f3x{H=0zb z6w^#8S2NRC%vZ*ip;_ZBe^A_ZGf=yAwsVClg{Kldg0&`iC8I3^Ybra#kK}hv{gMz5 zi|`*YcH_iIt1f%tIa7AHX3E0ZKAiT+T4G~A6D$krmAo6!W|`NK8LkyUho6%BgxQ|~ zk7!U1H|sEqL@!SwExLPJ*qd=W+CUb!c@#Ae@dA|D5$RSdkSesq+erOaxC8?W zh>WHjVSI3CkK|IU{#_G@lgpuxP)8e(D3|X;w1Sr(d%4y>CvYMSlYS;$_x%>BK1HT- z#)*7P04M2onB>P{{Ws5$JSQ=!0s!XXG$E%VlJNO0;2K+K+4Hygl)Ijlnao$qn=dQdA zYn3m>wWZ0{?rF=>Hg4SypWg=<#N=VrKi%@g^{hCgaLi|QRIo|DPMx#1Fl)=COE?y_ z(QZyXzin%tc{=q2)OXeh0-RTv!eqM>beLpKDPUj~KR>k`!g8iO5B)8xUFxQO5(sl; z52wPKo5{%S`7NSlG5{_}4~#gTavtv)w!h-+NU6`fG8wuk)0bo-CwwiAFN1|3W$q<| zM=(xIPyE;sQJm;HQDN?;t#N8%+OMpSr(BzE=8mnt*J zq^uVfCao(-peP&UgA#J_r{C}=mB9gT#KAmxw7CRQva7CAU{#cg2cHYx&-AR7AkZ*S zT>=dJG>}j4rn|bBSSLIEadCQFW5OO08K%atknDpUw!WnFk63R^;1QwjiINHH^ER^k zy3Z)>4jg?e$0{{fkWvi;rwBj22E-D&KcUtGrAzhK{heP|Aoq*>3cX9~U8bo}@+v0$ zO`pvsPp@K|jU}<%HVmyBPkXgC$j#I97D72<)FsOToq^!3TY_Wx%OinV=lgkWwD`09 zfeiw-ZX6)@9>ABov zPYb?ZCE|bUg4IztrnWyHdbF4S0#50Uf+Ea}g1{b=yAM*$COPPhV})ih$a$4nD6g$f zf0C}DIWZ;wdgXJGA>ol*71Y8YLTq*hWl}eeCW1LB6Xt_wFf`kXXF;v)^j>Mf^;i!N zhGr!XV!c3Ciu+QvLALlNQfKH%V}=*K%;F9a91>IpV^cDdQWQT}ZJjJhr5xv-YJgU2 zt1;K8%JDdmk2l9Q%8^!8m>PLfg`SO1PYm_Q0>X#{%(P9-VNzfsu*g^*x*ezBcE<(@0em?-zN%i!B6)@+VQMnF%!IVzqjal@VnY5X%;)xjV| zSKeB53ibUIDXw^9*2_;o34~PEzO}$^9t>aHln{IYR(n?Oyg!tJ@Zue7dVdjKoO#j- zDC!49ZVv*i2ioHUSY0VTvq_{-d&JTfX3!_3tYhZ%6*iv%8&3<;B{PNTMI@aSLQi8! zD_<^ohlu<$>K)A}N;6LG(Pmfz$XgIIV597#J2iBLrFXwnEeR&X#F`&yEh@JV z`BfHuOdfnur*ZSMHJs%f~y_`OFT{}@56!1b0ugui);R;^B&MpHV43@Xe3sRF*2Sf?W ze}&0PN*%`Z9~r`{=W1&o>0uk)jy}(pw%mG&@7bzn4-PDjbpD{|U6yHdchik;b&7~O zcBy?WKV==H3t-izz&$n>^SN?419H~d-xN8&CUAGe+95UlksuxmuAR*Qt59(B5?yCd zn&b7XdAUo2-AE90QTvN7?N251ur^Yqf~k9lm3sC18eV#%A}&U)rjRsBEuRlh{0*xc zt)_)`!mOvZzDhEr9;*yXNw?M7&~_PFS0c~mUoX3U*p%MM1aZ#q*LiUj(6>*m7D*&3 zhP@dUI-M%7-VK<1nRZA`>Z}@N7}_IU|B)co`qD^yR76vSg55=Ter!;<)RDI^B{q(a z1S-AIzF{IFM#eH_&NQBqmx?jY`8qF}dk`(?5KJef@B*v#?6KiMC>3a&YuvzQh(}L} zFNP(mq9*c}Xa zDR9pu(CR{RKn1eol>l056nkP}!@bN2`0TGbQ)(pBfk5krox0$6UCWtqF|ew0$#a05 zl2>?}?iHBb6cnmeD+BglqU$c_F(^x$H+t-##p}*jj;ihvU`d`;@#sdP1;(KZ7Sn%n zI9GolyQsy5_~%{hQnsOTluuna;9g~w{5aaH@e5spKV~X@6X!3By9Flig#; z1ACU9naV%@_s4qT##M{IyhEe#^y(StYGXnb@f@pvA@3xy=MdnMKA54b85;HBQ6`VPfhCNMl*1*_bnb_FuxSHlx z&}s=zq|$Wy^4s02%4Yp%p7K;Hv7kW(ewvQ#FC>7R0ehQE372ddTzPw6e=3kTy0Bp@ zei{s%U11=z9oQuUe4?q_w^Gv8s8dNkQc&Sy@m~t6A|@ukP@Vk%Q=gf5?nnE=+UjEj z-=P$T`(HkqH03z-BkQ>A`~J${4Ep4po6%rQ`C#hC-plAQj9`kdUF}r!6h~gMx}=IYvZi z4upz6oi@Pa@@?N^7FcDCDBFQF`#iPmXsdpTz-ZMo_`e z->ym|auEUtER-@@Q%iT@-#f1uue}4C*N#P$^HvW6v}>5?Jhz*y>p9abB0cy1rDflh;U z>4Ag6bJes&SlhV6EiC&wh=51i3XnMSsTo}?8d(YIU@ZB?S%~bNa-l7fhfrdfkHKa= zJY80xmaQCT&O;AZqMcT=9_zz(F1DBk=)sN zNX>4*lUq*Yrs@cEF!`5ydNA?Z7~+|K<|FKK9M3Ajqxqek8CZv%_Ni3y|LhPLn5=z zZh}lb?^0nwB9ZBd9_!bhm0WM33jAPBDSBQr_D$eG&y?ziX!?fGsfd_2K{U$n7r&Gm zwh}t7hijF{t1x;UV~P|*b=U3w(U6*olV^x`Ka*t}8@bHBQ?6k*N5GywmHk)jsLS>J zeyrH~AIBrHL-j}MeW;#!yD66iTE#YSH2@`-gT{oyOD@VTe}DNghhPGpe>2nGLl;YdYoQoCv*o*$!p794En|87D z0caUAM#6J>_ZZ3hYPr}qzct)b--G7edOXa0EkR}y_V^GnZL&9*>sy<%d(3^iL1pMq zdvW_%avnW>@woLv_4CJZ5yKU+av_NmJ$;$+)@F{er3T#9?<3I{P{i|z+kY69e*=;z z_mCN$3JoZ>qYVpZ{Fy}T+e$)1_DGk?G2?dDr|lkukXsA?2J|7Gj@=HsL$LYFL61(Q znewG9kvl*qWn_YpM_eUVd;i&kdb7&?szqmy0&VkUo&4Q2hq~f7nNWcwz9$Nm0&Mfo z>0VFi#QFoROUD$@#wLGslf`{YeRE~f$P;TG#;PnCaaz07S-dk$z^S%r1xr~E@C3we zh6@_FC8B}AASmP1AW7=S&Z$v)Rh=^-kPP6^GyxY^0bq;lxy@+JsNc0eJA)x!wN_$^ zZ){kdQ-sx#-S%mc@h9Cn9BGC`QIfTi!5}Z=QFiMwDL{!(-*hDwG~}93#plNQ$Y?Tl zvE)O3H-xBmWtzn=B0OKMMtM!#bx9j%^}fu#!a4^B^G(s&3NrG6yBP)?lVTYe?+6cv z$99ueGMFc0Fc-q|H5=gg*_7^9^pVHsFv)>(ZyZcmbNEyCpUQE4jK~V7tz*M@$mF`K zddVmw-IcmAP)~1o%6gHS!PHCDC~gvf^ehqrg3SeL+gWUigbGsL#vq3>*$}!Jns#0( znVpW6p4jbRYBHl)xyJRSN02&IC|OQpjbUGwUbmQYop7qObHk=31kCvkzwGu9;=?c@-)Wjwa9S{%85&nb1 zVtCgE3S-a^z%)fhQx&HKMB|I>WLdEf#Tu8MzGGin#SmaJE*;Vk;cA}f1vp@-%U73H z3pG~NIQ0yO2W9`$!oE-rRI%_AK|kx+poTCSCBS<^bX@-zZM%-#8xpb;ZspU?UfvI& zsLm8QbP>8PM`I9|F7J~Hg;mBaXi?ddBI_`Ke^;!`wG^w`Um|C*pko(FPQkL6`-?iAJEE8*l=UGkHwQ}GApl=9~bZC zl?S8yPQS)r0)$p^vZTHOpolJL+m&HUg8dmuv$tT@RzzAF$KhJBwCuUAO)6g)zYr~` zIE^P3uBB!(cktVgbc-MX@&|bflBe%;l)gL7$zz6=d{E^%0#;AR#=Q>-%@5}d?fW*c zSip!$jebQ3O;;PmHedG7mPypgd>cTbCr@bsfvA_~W0-QratuJcaf(eKg{85OdQBMc zDv?(sihGzP@>L35G?h<6e9{flt~Hvn$1_pLdP$B)p4g+cmat)N659=yp(g8@kkv_p zTE~~*)dSxWoWnlozhL_Q^~HxUx0O*VA<+WTDd?AxGbJ?zW+gP!N%gKUoYg>Xtc9H9 z7@9aQK%tJ@YT1Go7H17i`h@3|0(Bgdp3ryZPW;m3Uyqt?v%C^GOXt}VeM1Lytt|bZ zzJmb$a0X`mK6XFBe)$V0Om|EDP5fpFCOVq%uZISQ76us_SB)bDs4N~M@n<}0TenI9 zk!dZ_+@s|}bEzKVm|;dyG=(XvQJMl7kZv_UCg4aANG@{fwbJXnaynO!JSS*rsVCkE z)XPgkYc*qz*t0(WD^V~o&DK&XS#zkwt5G@c-5BvY1>^;Mp_}-{pR8T-f zL3)!CN57FPhvw#o_%1k~aIOl6DmuBlAAPf!DQX))K#w=Ym1|WG zbwyMF1SAS>ZtV0}J5$9YJ~3oGjWLwuiLUtC@X^!nN9EK*#wa&kiKWJzP(qK%zj&<&cU-y+^rnrD%S)ZA=wL(HLwn1T{Nt zxg63ejT0ADVkmQkQ**>mAd!y=Zv34~5f27L@WXJvL#ViD4;g->f3Tg`Q=(5hn#1y5 zR6eOe@3-3V0Rs!&lvX>FupVOK{1p-}=SD#t{N;isvxcTH0lgGzD-&Vg7?{4SH8&jC zm{z_8qKV8kW=E>>pp#X%(0Dun0Sa|tYGRY=?|@c^I3&;`^RO8~hHEZ9&O*>S84<`I z@S2|{LfgRXh7&Wf0F4wC`?#Mbc<9n6q3y%eA;vGqMS?;*`#{0bDQ(1?*0 zMhuZ@P*SlUBsLKbU)t4G~@jic|ZUu z&i2Jg8pWGvPechE&a1{F`2C&MBgKv9)pthK@n~wQ6tDa|l%(H-e*NH0d(xk2?)3NV z%42Hp&O3kGlHa!qAGCI(r_3!h$s7Le(x`9L7MQ5u$uq<#Cqs|c;le$);Akm!oZXuI zvhwMS;TI&aW%N*_PJ5Mod_o&e_cF9$%{*-gn&{i~{R5XDRezset**R4jIZ8E+eCRx zSCrpW&Uo-*GOEDuP4^R|8QkE3Tj=S!R$80(`0*q3D!02$7|g~Sqv~yo5x0BV`(*8< zC8jyk*Xb#aZj^@$X6K7hK@hON6BKG$hNXuqiIxMA`n^ zn!z7XndM`VDp>6%0442fOT`4)YXn3Wrrh`;L8@+fx(DzrpGUqWU;rg~3)8*}kXFA& zNw43SPO(~L18s@-y^s3iDlVfpL8#uX-j8AT)ARsQlvX((r4oaZHq=XF=Lk|v`h(Sc#>9-wht14n%oO6 zNfAG0+44`QD@nbar__ir8Q{oIF6Af%ljXCl7CUc33jCk>*{Fl@95^F~WTZdjhw zSdz!M6c-O?rbO>X8Ai1%p)6b^DX?C5lnR>y{nkh3M8vP`rO~y9w4#1qx;Xfl`6479 z9qUQxgdSId8;OEq==jnS?0CMIv?#Ut{6s4F7C%kBE)zIm@b&*otUR{YxgCbv9mBTU^UAsbT&E_~sSH0my8v>T)wvV}4xfI1n1jd*$-nbKPS%~OLOA}E))Vs7F;hRgVqZ1m&mu4E^z&mSWm(iLqYTahavw|}^h@rs zch^7sdU~rnnFFh-D)70E-=k8g__7UAAP&9lBix#Jw|~@B^2VY0xSjLEUf)MwQ{g!e zx)n1xGaWbID-Nx&WjdbO)~2uaO-j=d&z$_|Xnt?)c-pPeRIoA(> zc|{SLk0{8-xK?M-YAFy|f~e~+gE%WMyPr>6ztD(Q%~MVgcqojSKQA`U$~+PH#5#~hgW5^7wVuxt(a(&AF|#>?ws|c(h#tzwryB432b`N^O0#9`QEW29j9>rNgE8V;$i`|k z7Kc1+;P>hOZ$&r?MA;&Dd)$>&QV03DX*HaUulC?Cg>lJdCSvY|C5O@Pyk;)z$S5F9? zwr@*G6nBm@4e&7gU&$j~LdP(AebuajFvTu(zGJD&WYSJZms9@I){J;YR{%P%$n zfW3!`fQUh}=e3wewQ%LiMiF%_%;X(;>OSaOcXG5{%&a|zRseRFfrvoemP>HD;&efm zg#s=sXkRuiM-St)Jg3Z+Jo}cj7E3c5|Ay+{cVacEDHQw+Ba44UgI!d{8hh=E)Vw+6 zx7p(WQ$2W%t~+M3o_9ToW^py#eP%}s?YN%Fsx;3_YL@fEl){dZ7geyvjK`4A?;QGT zsEbzpBJs9>^{HCFD)I%9nqy+qqomDfei<9lzt z%;<5KYBqUuvceyAJZ+rs#1X@8Y?q+Kx%%RTHMP-$oB<{Uq;JEcp9;lW47W7veRiAa=%zW~oHPzo3Ee})mx5LRIqRgz8Uq&QRKj46U1CzVSw zKv;^qr5z-?G?XiTG&Y2WKWjMS?D4r!%!Pn3ue7+(g@wxKtZ7}npWj@C9^Qz>Gfxcm z)N=oLbl^6{{Al7kL*=P=lnRh^X%*J7>2(cVGs!Cn#smH%P;>s|^S55$!B*z*WA6@hSBH(vcuPZ-MDNKe`kSA`gPr6;%075vm%FfDL zqXAtGMi1{-O!~OjS;63?S$;47nWmOymmhpGDM$+SiW33>bW8fM6Q-v<<96%@yJRW- z&XG>}1WuUyMF<+rk$fk8PXbU${YtCf%6YA|0dvSWq(O=zpqSmtfNn+?lZ>#nK`n`e z1PH1*nI3xR1pbZ_0-6`vVSax^_6f}Xu6AfB``W1_i_4s=q}H~O@`SE3Ll1j0j^8j=Z%oM)Nh=;rOd{}-$zPBU zVJyxY)n+9d4bT%=st9+SlX>V1UNREU2f-{ZIxwW5#_rC>V#=wUQRjNMF@zNxl$XvH zmC|>}43y5E!KWEzoN;CvLHN*i#r}44mUh+yE+$!FkuSJbW0!-3BY&hR-2Qv7wfrB@ax7+y3HR6(8NFqdf|;PSx>wk4j_&6DWV&knUzP?(vPi}Qjhh?8{dWfN5HtU zAG+|l^FM+J_p7se*dC zG_u?9hdwQLe&utV2)ne5W0Jh{%9xPNgSfbXn9A>iaY?m4F~jek{Hsa>`L!Zx&g5bD zey5D}r|l?Qm#fRtl)icdp_eZ2>Z<$%)U%hv@^LSMOPPP~@zl@)_fBSJnx~V+>oJ(l zw)@ZRr4UsPS~p&o)7{D|2OuPOSz#5b^I~V;qa$L~tbhqJq@*8}*y!vEz^wqd=a`!98T z2j@HMskx)Af-^3m2}1+b(MR;BPFw6T7Q-mTV$r0!cwc)i)=ywt!0$0PoNTL+v926w zPI7aDs_LG-WXnPNVXL9U?tcV*OX1JYtP}#iE(X^h{znir`PaPZr2@aMR&p2FGi~9b zp2aviPd%bIU1szHdSdpA>l0h6FTa*m!iiDaJL|Le=XAfrX&5N@#>FjL^7X;j9*Soz zZg>8YG^MSd{@&knz1$0!ON(3y;ynobaerAMU`42{p$+&e@O>0G|7ZDo<4IG%2OwtG z;!WkJ1&(bm=lFsP?>}l`3Y%Ctvo?;C2wf|HIimpPoH)rb6`C@Ix@i25;2c=A8`>TZ z4k3)4Ds8SDRWO_KIh=T@%hk&LWE&n>AyTyY&XO8f#WRbtBq+F+^EEDAt>d>2OasOP zR<=SiPL@OBPkMu=9$x$0L`One?yNwGVA~6v6JLrZPqNI3B+DckZU0tJMaghaeg8jX z8lAW;v-gioZy$(rml>Y89|!CjS*l(5EdPzV=zez`!Orl)wCvm;Jd>&Un#zs6zar$e zk--M_?%xHs>Mep{P&u&lA&oO9xJ7qt)4;Hs7zf)f&DKc5wrm5BQX4RXMev&1;~jU` zzU~?RuEB?Vqi6v(yxH@O5|%3wO_IcwZBU~niK_&TeZviMSbgJC_jUx5dD_vdM%KkC_&SJSfyA2!mp%>!=+NBu^s_!+iDn}$23 z_pwWKiFSXKmQt|A)=&N!f^I@%RoW-Wfu3|+S0bu8ZwbD70^pY$ap+#dX&EFtW(UIe ze(M4rg!|VXATi3$i=vM@LMD8xw^4sw?}iA*t{%_+Mb9KUmQX4Po16HpQr zo*-j>Zd>#foUu@tAZ==o5hA#uFJ|!7fEzHteeN7!whR&-jl$V^3jxQL8Q{H5MhauR zOP47aa)n>AnPBfr{dzM4l<5Sqe_Go2Fu-9}XAI<53XQwlh*TbiI-8iEJD=w=<7!X7x_knC%<1RQlGe6Jk+AbIOFOnH+T#kA>Cs`8M$xtUQ!J+ zT7QY~>EW#ADZ-U-ClnXZTF9O5xTa<{D z2ElM9Yvgny>TwOiVrnl>rUF@V(VM3IX{dJa_CVbee}j61s#(I`Bop8KBznfv(%?j- zBlwg4`e!wrvKsTc>ikMksdH)ywQSQ(zH&bHmLR0Sa&P_{s-cJ0UtVoL|40UhaUgss zCY~v^kM)7bYn0!G8>lhl_%O!LKKJKo>I)vMsjEUSvY`;LS=$urjFsocSn`HJf@!rN zBC5lQWtH|Zn7C|NRZ+)ScP#zC-Bck0Ges?tZ{b7C_JQ|w62(SZUC2b#Yht+EmtS+_ zn9&W|YF_#*cHwOeBdaH5k(#38>p+vo_x|}F4f(yv){?e%fVT#0v13|JLM}GrKGI1) z3t$Qy(fY@UzlLBarX-QbkT-utD(=^5*dgGB!N_a*2mHI;UITmXQ) zA7p%wr{V9qZ7^&7nuyfOgQLh)dZl#u>1W0PKDURTsS-Hvi$A$*sq$L3}ffb~s$s=&gd1lYRh0?!kXcrY;h}ZOIUp{4%%fFtf_NC}FkR>uo73#d8UEhYr3*Lf9jq)~!c2aw&95z1!Y?CSTog~q z+;vZwUK-vIdUs##ciU!W`Q#nPjE#A}PzOc2Vx9H9CT`0&@UzTt_vA@q*Kj%h`)W}g zzt_W*;@Tx%q;%6x6^>u+C93vWh40zf;n9VWZ)IRyIyt#n*l$c`bi)~wzk+P?$-e=u zB*oY=mDzfU8%TouQ(OY<)D}@ELiA?cK1C%Mx}$S57X-~dpdKu6(IT!I22qshX&rXw zu-jQ`fICk8LY8jIX-%hCVTU|6k4+t4<+XcTthCkF`hG0EP>usYJ?*mO9d(3 zW4auc^GH%2;J+r+L+sZgMNu{)y0Zj}3o*qe%pFIQEwU3K+apz_D&x`q@D+ic*6lG= z$g#2eyj|oT3&VlyOa9NE^M{RcXWc(4wTdr?+*LoQ$z}iwvtI}H31>d{7-w+jZxOSr ziZ!{Q|CI+!HY55H$Qy_od1kr~`RXYL7<S1Qdvtb50x}?_P6I97$hUlY#VXv?EpII9uP;f74L$)DO=`RBj4$D^Z{!=) zD|GqMHh(O?=532S2dF!)1cz^a+tO+)8cIb<0qb)8;$>5*4e&Z(qyaNsAt>FzIFW!T{>?Us;7L=>Hg?AwN9wNDr+z7op z?i0E&I9!)$tWV6-U}C(BthbLtWK$r*037L097yth22 z_*LY1)!)8VlDB>wzZmE8JW5FL;NR@L2I0h6gB6LQ{WYd)Eh`?DL%YGcTrQ1pK6oOl z@k%_Fg5r7fdr!UF_zRuLuw3avjN=+mAd(~*qsqI|puX^Pt4y{-ic@q{I+Uy;#TEKq z%<=9@U!ZuoyIOGNHMcA&H)3Xc3gVG5B?metq>pub#CD6A$bN0RU9|U0M#A(4r?Lw$ zBT&Woo81^9-@8;~79YDbS@&HnG}Rs7j^$}>%KsW880o&d(mS6yxl?uI{0$YdZB&w# zE73<;n6nY5@bO%#`K)9uV6nN-+s|n|Qxff;Z9HQeV{&hzo$>X6xj7|GX11}lGWrQ9 z;Yb~ir>4sOk09D>JK4!Lf%UNyk+_RE0^PU|2t$dEGyA2p0`3_tIKM&>1%v}PHA^lo zk1FADMSpd-r@Jr_*3GN06mPEe%BeVPATP-Dv^g?VVBl;V zLF?-4j%9~*`Gi4w8kpW)5MWTnhlu#!_vsp3UM|8pW^g9wtHL-`Rhz4T7BQ7D3vJof z@uSZSDujJ}aibb|K=wH>c*JlJphph|dWsk@tJt2mdW3B;fN!WZaMJ)|eLoRAnX7~t z&k)$SH%955aBDf;;6rQhWm;l~h8!}!iZ0Tc8VamTna_?xzn1ZK%79n{ zYXFeL?z_c~MfwSMGvbOct zQ741JD56WO%o2fY=e09iNDl>IJVfZ%VQZH|WI%?=^|!v60yczZ4jllqynP#CJTa_M z#sH*m!1enH+V%52E8q#fWYTk%5e8?b#R;T-T3gosAhenjKjW+OxG2QXjJ>uFDobC^n}3&|gW7&rx65 zhYcILL0%%G6WN@r5e@p^@w4p(A;s-r+b1u4A@4(6^RhL}OL4y0u%}*3mHX82?r000 z+0!?7MTPjo21L3f2_gCg%25P|`xAW+pMDWmy(LcM75BDFp8EvjKOMWHj?CmvP>Vzi z`OC!E1UJ^uCek>rTH9E;|JpVYtItTMr1iH>kiw8yC-s|hJ2==O%oWQb6|H|m_FX5Q ztDJ%$LyY-}WTluRAIk1aH#>p!S~W(>*z;u0k`L>}0kw4NsT-spmdQ8v8|m12cU^}f zkAFodBVe2a8C;Qtg$Q)EDmAr8ev)+d`Kf?8G zF`xg!xzb+zN02;F$Jyi7n9!I=sz9>h`RS|8_j059l|5@^-?`yzhW`jAfdzB1H&ugi zC7R3-{{4{kM%-1|9%Ey0(88$v5dOto5xVs-$0J%pVV&f@hJ^uE^SiwKd-*uh?MA>I{%LH1Bk3eldE@JCU|;*R*Sull6`{sD|^hy~>#%HM+|mPr`|AqyGQV5$gVPW@ zl(t~Nb?4z@L$=A(;xb=j^ID`ivXAb*`>R^vH3H_fcU*ZA0b7_q51#tx!<9z{;74yp zSdV?hz9drxGXpJC9_1FIOzf)uLKBwmG6&6#$bSX|h1}CV$8O4(Ams?s#*GH-q-apya&?5!_m7KWe?ZQj;LNWr`-r1b zXkZ{UTI12wz`TLPLt-a99&XkDrb-E9m(u0VSaqhl3%(h+3#ywC`=ki%3W&~A{4w;{ z(~Wg~ujHNUvwQp&ioC!3S_lOhMf@tL69h=Jzx|H`;!7Hdk14j;;^~KzZ}c@yd&&SZ z(%`=DSdqh2ZEZf0U4ZNH=~Wr_a1sXn7+qW_hTEWdCiJCNpV%p@+(05c3GUvQ(z-Mt z2BWrYO!)2!{Cd!;rGMVe=kDG+Y4?=#4!|0L8l=CYPabr~15(Y--dA79)l3wKAI7eH znrr;as-i~jZ}+Y>L+JS9RL|A1*~LW|mIa){`UVj=X?;xvp62Z5y8gB=1kw4!_TZW0b=o@fK)vehel z@E=gO0BfP)?AcZM_oAC;Kmc3FwIH)=49M*7)ZW(j@!+!v}-kpZGp z&(6$70n@|wXAA1>Z-M1Kf3twltKg7#mV|jH?MYiJ))(C^rLXgVm6#0RqrhCbkar8f z)>rpV;uSWw`quEDT7tBhYFl6sOJa~}oGf!~Up1RCWmBCv+SGlfK+fj(AV^ z8=g8hcc_OQ5YbMDB?FNGFFem$;jN{RrgM6em&%oHCzF?41qu7cH|LRuU~`ahFUvu_ zWBZYg3o*F0!j~3!r%y5X=(wVxb?#l@5b5CSlm}yQz)_JBAmXX!`|ZxM@rY&g`1D*`3nlMnNH_B-YK7j&w{vOIS;@T6^v`7wo*9fuh19}*u6 zV-Q?)7W7IfuRW!vMdr;rMYP#R+zzjinZPezz*2GBCP?KHbTk2Ba4=2k&IXJnsno(8 z>a~1{ZV(yCbc4o=?cfwd=fL18uk%dP-+HN1fr++|bn18S>{0fd0#)%~tI@u81uvG>^GqO+iSYz{YqaS@= zm1EQtRyn`DGun5l(=+f_gQF~tMpD-O7uHsod#8sI!^xw=GE zHL4->E}5^o5SU!F-_}9yk|<1!yhbrClX%2t=n2pol7pI{!5PrJ=7%~pjTrWj>r2XfX3`G9i4K!AJQJ>EYu;#>CxC_9 zRaqt$qwCpo7Zz}Zz<)K+;$fAAKQ0czInDu+U$st&IYMFZE>POB=pFg z_w~D8gV_!+pjgs!rq27g6>hH<*GYBrN8Q5OWm>+tYf@8O-R0;f<}6|x$1bc}bPLOIrRKYR zaV&z%@Nfpa#Tf}5eml)Wlqj(ey_)6Z^B8Owvn~V#f~vZq*TAQZVNg2sf57GTHE^je z(LfuEswLsfbCaUeZil)fgbE{dj5wxsqx_HVvgNsJadax>8SM zitFu69`;*b5~%|#zKQjmgp5F(>0EIy;!sZEC8HF{H!F#!3zvyQA~`Lo4(Dw z9cYzRFE(iA>d*efwI8p>2ps%XG{M^YdkAkVGtW06QTKnUGM#oqFtJ{zU0- z+qisW6pAt4*6%mB4yXmG|EVJKtUUDFvsS#y7% zO`k{pc)!h}2>TbwdH(sypsx8^YY(ROM_(U$M_6d-8;o6hVBt_q_W;OI4Lv{A%A)Lq zx0cq{zV^)C@x_?H;Yc`4QsDvNG)l%OFX@^lNNo3uBnW+dQ0X{1A^yvkWylGZnk1SZ=_mxUUDq zS|^>V5BoRSN@vmA=EhXG1Zs(O{yAW0^>`|>gVviydDSXGMG zBMKwvVa_>|3cG!~^V)skNoQ`XC8>W=%xb18mfkjoUe_+7Yp*DocGS?|>k-00n-E4t znU4|egFs>d8Qa>?t%?Qu6Ycvgu1^p&HW#lpe11|}`BaW&HKas%uCPEcW(jPZwXM_YgWCmh!rE#z^zbGFgA54lKKrqhahkGSJ{ z3_Y`n`qL*@FC-ZXfvMS-UtEME9wjwRFT}?7qZ_;aiQLY6^tE*j>e^-1ZrO3&`WoaP zu`jP3KxDLl@_SYg6|EWx10^W#18>yda~E1n|3v(rrO(n-(u98Nkc~t;u7cS`lKK&6 zEKeCjfmdpu+CI209|edCE36F`gg}<4YXk#@Z#AzesHv!AiMh%09SkEy?*Q-AVJQN9 z{)|FLUcX2ynkyy!)({KZGaT5|KFM+&`!9H5e*_4Pz~v;;<{IiuI8dcRV!+Un z*+S*k0g zr9DK&l~(o{OshgW+rP|ZrHMFnBZ$$Jn|*etI{g~TYMD6#9C zbthX<41vw!r-8+wqSEg^WD2ul=P7WVR{kd+ts67nx%A!|;(qwd{kfRz&w{RiCwKln zuMnE}?MnSLtw|s3p9QYvy;(-QPssB0FV_OcN&QQ6{W^D#J_CLOLa86%qVh;q^RP=t z@^4bEnl7Ks;LKbWkC?T5D24zxrr^2C^O06-RmNDm=OlK{75Den0IAoIDG22gx@er2 z01ET76{U@snVEqTSOPU- z-v_l^gEfzNxideg|JuG=m)yey?}UobYpM?J?NL+lkg%ojw6S!Y;YxFc?G?*LMAQ$} zGX%NeAh(87{mobrRhY`zJ)L@=hlVi>H>l!49%`gdfZavnGZv;Haf+olE(O6>qr?lq zeEej>n%zKOeE?_(zZSccuP>;}l&tRg7=*n^#^xX!k_dc3gp~S_W_mu3B$jxmW`M5* zQqy1~p-NZ9ig~ZD!c(D90y&++d8PAOheIRx8yT=s{N*zTiz)dK_8b@wjo*)d+N*5U z71Tz8x-T85SjauTIc`HPrbF^v_zI;{0e-otp;JkK47M4u`X6GbkVxB1u_XSFpkUmG zq~~%l%ktJg$<@DLu4Pg|lf*;ApTl^^;4ZL%nI6<84O~agkPF zNXgGx@cU#0yqs0b7Dj!^Nop?SoYo;*J+=GN`|n8kns~8aJMWh|mH0q$c;Vq`iz-p- z@GJ6BEBuRY5CVutzKeVCg+Yt9=^fJ(h#(Mfz1_ZQcDpEc-XfWpme2B(X0I{C;*H&a zm88+7d`UXrNDYjzFR8hGupi?h>7FD&gvFg1l$5uCe8k7+!uVZ^)(d830-oVq2Sh})1*=YpXTm(HR`UVn;_foCXjHsxz^LWyS8MZ{Fg5m z6qX$IUOxEdPG>rK_~Nh`TfgrkR~|BTroAh(XkZFRc^s-FVh{IDRNr+tzay`4drBB- zq3tq5S=gK|&Ek=O!_(Eud#$EqKfiXK!Z@{k)K%GM_mVQv_+FRo>~)sc&t3pwQKc7KjMCXM z_=~6ePw%?uPjnL7A7+JF%-6Opq;LIG(o_KUZ9qxWoUv-c->d%6;|ZIwt=oi7B+Sn1 zDPAEz`+MFS7NzWVw~EVn#dD1UOR$Z+Rt{GGars7r)H}8b(cOVsO#?Vx;7x=knH%FZ zJzz2yVHwTR4$jZa&V8${mR<>UEEvvA>piYo$9rn=H^FrwwtW3u1vq+lSCu>g8$NuO zCz9ibeEQWgo=(j(dO4D=8_eWUTy+HUcz;f<3e-OJDtBf@wSetIh9KzD@vD1ml*dl7&jmxJ?>=jTQ$VYPYQWGy%$HRMdxYkGK+cLTFwCK7S!=rL?k8Eh#i*XAG%CTATbf zuKG17B0nf=CJwdDGz>S>w;4{M>$1?=WW?^c=Eev)Jf4<){k!K6{Yj77iQmoAZ5H(Q z0sN$?xpC_1N72>+)8Dp{s}SIKvv9)4P4My?@VohD>L-w`oegAb1CpjMSC+TkEc4DJ zu75lOF66UI^XbiR4LvaU^yzPq54BK#RImP(_l_lLy2rd`0?N~nEWUsHPhNIWqV2fW z1LBnr)=9Yuz1HumLDlt@i2@}QdERn^3Bq;?6t{cKT3U(c_U3y(a`gz4GshmM`u-?R zQX$_8XR(&(Th-D7;l>@F6nuq@VX8%jYt^9;g_$yf&k#k|1_!V3Ielejd4?~|<61o#) z`|vZ%$_uYV5PA59!j#-Q0L$c>nn#G}*Cpw4$Zzq*%WB@d@w9{i{3j=EvAINVtY$n? zY}5efBp@-8Z-P@j9~u8$5D0`p441ICOcFGC1nC>?K`4GxAYLz4CP!6&pxaPDI?SVT zAF5G~-KIJmt^F)on*p~40=rTP_@ZNfWXNsfaiUt=dURuL61qIB-SO=GHNxk}k=D_y zj+5c0F}1H118LM_#fODJZ6HM>M<7F4cj3-hG1 z%JE?K$%Hm7>9!ty$y#AQ3=v;1P|H{G5@x|T2%fu+1qpU}nPB3%a$cd}xDlE!K=85BsswFbKpMoyW^@c!QDcK??FXEgsN&iYSO@9Lw%mBL7K zwVp=C#w~-r$fh3TsI6sM-jS?7n_GZ`X4+S)0L6}!cMYL`Jj9Mk)h|`T1jVtdnx`DC zVp}AMHur4JP?&6pV~yNyxCY*Q+9J{Mw=UvUS+Yhf$hD}=f{xlwUQeZd4Hx?O)%oj2 zm3~G@NsmpGw6uf6n%EMi@|CCo?9leHa7W4=fgEt68=ij zNbJd6A>Vo6KUV!Er!&Z>Xs|houAw(+xlFyW-T~$myg3nbjy2k)O6{VEN7r{dKmT|S z{V+NwTc(=7L=)EK{hIW#u*ZTZA<~PIH1S!PwsNnM3#w7MlTjxwMqocIlNjuaZO z$kITBC3dh>qkL7o^tq($^Llpm)5V=01L9}=Y$y-o+CxbEFo_nNZYF8L%M1u)BOZ8K zM@?Y}LRh=ZK*0I1r-q`c)BujcOj+N)Ib+|Z)d!_aK-E83a`SnvsqCs0QR}bflX$UI zyVdCO$VPykExbCBxyytsyoClDqB8;+a@)y@Q42`OhDntpqe+IKKxcQ9tODxqxC_Fdc4nc%GmzTdH?S0rN z+8>AMt?sR4ib%#LN3$jh#M3g2d}d3Oc`bIZv4gYk7Vkp5HQagJ!W^Kr9yJ)Qd3!~6VT$K%X|Ys&QnEJ#HI{-0 z2B58F>}ouv*i%3@K(4eZv&tI%L$o(8MZLN=0T`*FD(k#YL_q|X6^nRDc~Mbn!M^=$ z@`PC%tWW(Z`M&~$Q^Xry0^+=I?c1nfU-P^m^INgjX|#`nhnhvVGFh5Y?;-v)^_+_J z{AQ3afo*NEa=Nz0n~STs^qXb(I`5>jCL9Ds&HpOCXCbm4puv6qn`i#9qs*IvItv4uV7) zj4_Rk`o>}>BKg8MyZc3xB7m+WvVD6KhYJN6T#px#oFujPA=v@NikR-z+&7fx6l|7<}4~-$2 z<9$A7iB*^z|Msx=6r!s@l8L?;1alW*1hqaXTfCt%5>vgLixgZgN+BC%yJ^UL8-D zlvVb4sD9JnstvKKCTCogv9gMH62x{u4nBph)4XtQ1Bli2IE1H&m}=DF{VfZx z?3gA`JLErH$Ur*D1V$`#owjnFn7WbgswwfeLEO%dnCmx@kNitY)+h%j`?WJOz8gB< zcmbwdTiBVhZvtI5ZLt>k+pJxR+Jlg;zxar1%)~2ghApv@a}zQK!=xJBsxSwCLrqzW z!{@ZML2h5CxRI2!NoB(fRv#ch_ZLjNfBTV-D?%*AUR}96ABp@KV z9{rwpB^fzGBgH7qAI*mrfOVs`@ag$Qg_*hB=v;r!xUyspZuI=*Z}6JxhjimU15G)o zaK07@;A=KWJqp;lu4kBkU!6SPjemWsffZYmGt=)35!zaAgczOUG2LnOW2Rg+`mdTcFZ2Z5?8~xkG1-}@ zXiMn=Ck{It72pe}rEShub~H5TKSJma>5_SThV*ejYjOb(>3QGU`;^HQobgIq+*@&5 zYJrl2rUb6eppB}dZD0h=Be`NcD4TyB5;8-+=o5LyG6fekNZQ@pIQ`#WT5uP`^2jb* z<3sh=wFKPlrXDjYN1uqAaym?J4)%P2#9~5N#Z3&ye`Qdxg#xQ&rN%>tZ)~~F)Nl}?j=jn^LOZ^R@PNUp6`tx?jGl)MO^5|>t;_nm( z;H5$xthd4-*f_sSNKHY+?3gD**|H%{<`sNqd1*1tuB?~uXOj?7nZL1BX-;%y=qvi? zVU*3#MBztdT03{*ImHz2d}vg^aooCkDHB?N8MLe58kFkh9tcw-Z&n9&7WtL1Wl=X>% z2;`+{xSOxsMh@Y2nh(cibjc?4t1wd7EF{K`=k&`=MCbnR=E^-ELP|F^_c?o1MiH_4 zIwl{_NKdUDMm5e`Ax2pcRz;-!ium`Hb(D8kf zMq4x-=L+O^8}r(%-u;gtX-wzWPv;dp?wYvuaWDi-bLj1mlf+V+SxjdmALdgs5N}zR zD2Wn}DvMK(ed(kN>qUB+&8o-&PEo~tfwkHpW+>!qkouXOcK3}8n_j%EiEi$%sC{i9 z3$9E2K{7b|oz#lsT7v%%EH{kV#KP7VOBYsEC;4vPEKj~9=Zr$gZ!I1zBXdkvCLMO) zgW>0g%`?>4kxz-lR#{w*;%BMZv#vYzFcIZq5#_A|ZjI%{_;)wtrRK7Y_8QYH>B~+~ z_rLhvEof7H>G2P%EOdqIbp;`YMW3;3?o;r31-Y)>6%6jl{gt?H0grg&nJ8X>xg$k1 zm+4dhORfx=<0v^Mu2mD|lw@W`)T=zKeLJLUK_xAFJBUI(S}4)e+}E-_7XodTmq{-| zqcSLYZ2z#=c>BGsqAW;0l+L4k;~65PZiscx#p5bn{2lauog92(Q6zigkz=gn;yN68 z38-PjG_F{%6uQ(ZS|wDJEL=5yYvHmtASRCt>c# z8nb!dEbUL!C*)$Sz=wk~{Y{Wux`ta|^Ck2;t7j}Jrg&G~6tRf6JQdw5Q;vWQ~d z-owm=?F8X{!MGCM91U)#@<+oqEs=RNEKV?S;uR$><9Tu z&oEdD#BqmH3)K&57M>S-kD@mNy}e2KQ`ER+COJAB^`rhD%HA`si7sp(q$*92-Vu_3 z5CYOWNKfc3q4(Yqkd7iK2%#oGC<00kp@a0MNUzenB27VxpkUV>eBS@N``b^uUm<7a zoRi7S+}C}Tq+AE~n~yl%l?VUp5QGgG-;)E>g}03^C4IZii2+*U|MY@iEpE@% z(|x(UDD{`9fbZT&-C6%>OQW2D5yXtpR*=jKZ^|LVw@uv>Kl)Lr#w0dWY$~^ufdYcW zuXca__%N&5$Us@W^nNUs-*qy2w`XabkkBvk!)t%r<4oWXk7w^jvK~n%R5~{d;XmtN zu)5Q0vwyPRx)|D_{T=-lcs49=g-^Ul)KpwLu)h{# z8*LV}SPfu8T#*MzKGIi(G@0h+L9!p%`Ly+i3)(urhZrww-mJh^YR7>5bm_a`{Kf)C% zZagR^y??U`t6S18!Nj<3is1gJwDP=f0l z&pyvH?$Cem=2+KQgTKnyD6bY)EA89WkTkKwEBf@r%9vvz*Kw{qbIWNVzD%BPJ2us% zfuA1wSyG{c)NuJWp(R<-_Fl(M6H)l<-B|esq6ifk<+0Ky;zZO3PXr@X3a68}e+DrcFUMjemb|q4IFvYP-XAfw2`#J$g6MC zfCE8b_k7PseA08`P6QoO<;PD>+kt4APAX9TK`_vx$elUz;6ACVu za5y{S*m%F;rKf9`L@Q)p-T%lHE>8F8Chigef|4!oU0-gqQ((nf$Pm%-{uTgMn5UF* zdG;Ov+e%4gnt96SS=JmMAYJR;*cJ+Kf6T~5v^1^Kxy?N>rOuBVE$n!*q6^{ zuuBP`D@x*q^sUU`%fsT}XLJu}FWirjHqAV$Krq^L=e)rq{Md{E`+ zd!uiloj;Hf6zn?9dKNT%Z&t>}*(65ntt7Hn(87>lWdlExl-mO0wf{hz&i_E1Z(R?M zy`P4LrB#;+;dK5RO2)SXmbY@bVr8|PDa4&yq5zkiH(y{IC@2^ZhE^ihYv)3k-Ch=w z#$#Uo+P`!|>-b+cw9y}JIltdOtbSqk+i52MQ+Z77_&)&sl;?_(UCqI*+1vkjrkdwH zchRo99t%>p(O^pBi+4r0U+05Y?$Am1N&9=N&Fom~=f_U>w{T5xos5;tl&pS!a+Y}( zY&Mpu#oJSd)wW`@wLXaYk!9N`LqvO~b4;q4AyA2v-{LpOr3AaHBhNs;((mKYRJl$z ziAA9iSa<*=4M+M74d3VdnS3{Wap8Bg=A~7AQOfyp$R?FTje=Qq(ha-CkZ_f!iy=L? z&ZoL%FLSgUxHY~!u6|H){t#gC01~%I@Y4Q|Uwj(BJq|YaRq~Cd7lKF4+ANy`AeU^N zT^iq-*yYU#t_b?EJxZJxLy7^A8Y+d7)M}NuLn_{b+aU?jW3Neg%OwM>U^_Jk`wxU}DP20Fwhbd+qB=GNi+ z$%B%(brbN|SwC*4n&W1X>(|bzqEi1xeo08Cc!c?>0J-E%(vh|9h!7(Q1IXPZvBC*p z781|2$X|>+xd?E(omi0`A85?>;>(H-6w_L>h031pue;2s`7;Q-e`MRAjgE9@BIfi} zKO2vOH@ks6Qp@!sLS9iwTCMwA?X35pOd2<~t2Y_PbHaP8Ds)x^7fmF)^?9VPNrjV- zYQyu|?tJ|BSot={VpX+=rBqek)Zr8+im~qv+o$uK4NqtyL}GJJ1}08Yk9KCD?vIBn zs!+V|Oo$KVWQIj22$X9W%3eGuU*R#lo6-M>t3+c}FZZmO@o^&cw>SS`EVj*38Rgc~ z1HAWV;NqGtzHHg%ETgLY$g>4N&_(fK(L{KXv?im<%Y_O&sg^kf6=&&Hr;w93FO!em z$LUn2RIE1FR6~xh9oMBOsTcOGRR1hg%}J;|9hpxmst(m8`Z7-iE&;%Uo1!|xvL0_b9B7!Euq{v}MHZC6V;oAEx%<}Il^ zDIFr`$n%TO(&5;UzISO=X(dCc^2#lynkI5y9nUkeq1J=VZ$oH-wW+d6CB>9+4__Bs z6d=agq3&2J)4MqTh7^%plhuvvC{6|ubkn(OE6fX0|$0NyA zi5lk2=hRnpXN9$tM!6D4`P>}G27>>4ipBh+%2@o&{?K0C>Kx1_ZHBT?o$b(L1qsrd zfz<8w{o0qlB=x>2M;G(w)n*ka`x!|6g9(@>r@dB>2O4j#56`?_q;LU-N*dWvj|{?W z-n`SYO`=>KT50)X@6bl5@w!>vlvMU;Y2K8xrBCp+HsLI7jVmw7UG_`T{lyaCepH(M zc@3w-&Q&eFe1*s^Aa(F4X23|8olW;NO?#~PIsxF~Ahn21m{F=IQk3#d4Go3vdXS!; z1&W4Ib7U7c$l2s~_fB1_+ts?r{74Ic-;&h4f$~2ep=Pn+w6!1jI4kfovQpPO(bRm} z->L|eQQh)p0&G8J`L?kB`x~GL$CsCt*SciCw>BS#z?`eRB-xkm z4&38!{zUcA@DatycN=ZQ$ne!p55~UqAF0NS^OTou zzKu_PpDn;JDeEsd-=M91*cYl=)i`|XiI2*AjikJ-o-q*v{pOOs7w;gp!};s=-%2e2 z386IXT&z17F$;z?42}3#cD{QYNUCFFpg4XxfAK-GDp<> z{QyRVnVOI=RQq&sBMme3Ucgb;bp0E&FxRy1p32Z@NJ!NA+<|DU{<6@U=^xVYmFtvd z0`$hs`dLAo-`!-s^^HvPUU^mm$HQAq7EmnJg!CL?V6lmPgC@uB71k>ibqqZUF964% zKrJrDmwdV1v{Q%g=t&-6*(o2-L~4AkAb_cv84+dtCiqA_9l>Gf-69!z0t~B_-lI98 z9(fCK|H}eL&6cjxV!kEUCab?!MK-@S_s^8H$_e4sG}EkKcSFojOomgzQp}$;G8=Bi z@t2y@zZWtSUNT|^C??FBD0%~cVj^*$-bN{aVdPZ^tTtA({_a|Y&|;HU^`m-2KFnT% z+*TUpX2zI3t4HXa#9~z)>P*(l@6Z7GEUhRFZpu(wfz6*CY2P)>)O;^R4QV7lRG-qk zpE~Q%6Qa%HH>Sp0j-#Z7id`1csK+X6)oJ&Vog<41eE=Sb?k9?}2$wXu(ztMlgd3w| zS3Am9)k0Js7=62k?u&OVv(P?w76H|bRXdp#vYCU-$d3|X5vPZmQ=z|E%VHuAk|~2r z8K}G*v$9Rvq~^x3)k1xK3opqWBRqA@^L(~}YMbnES8;CXC9SELgcFA~Z@A3j^tO2S2*G>1sYkI(#e)_W?4RuZ7d*EJebrlO1ZSJKH$|D(?4@m_{zd zSTM2zugJFitaB66PhxZ~Uy46c-S49S{1o&Hfj`gAhr~p4|KqZMfblbZIhzNGf2JOw z+UpH^^Ko}U!-64wL7_=m+lig!KMH_cwlhXe+ z!Yj%=D-tx-w6mPVJo6>Q#IxkGd3U3NZ#vZfe0DMEU%I5dRhi=d!ZI(_XViH9RV#@2 z0-Rkjh}zBQUfjro3VVf7CA;qHnb82x|FTyypK}iic4_L(?95sh0c#rr{TcUL%h3`V ztLkhH#`0Ls?uJoLdiQ?sur$Z|ak0vhy7{dXkiQJZJ%AB$Wvc?F z+#L$vd#ew<3u|GfU=4*h^0HOzgtP2`+~E$@*L`1TS=lGt8^e+jrSWb?WVBlU=KziKY;aSrhHu`G&QL(AXu*+#*LmzP~E(>2r{~+t&8+JuU9ycl5j2H$X_Yg3TJ>jjk7UzxU35 zK*0ZxkIwdb8GrC5l=|4bArMw06XUz*vn{s1Ms&@B&_zPpaw9KhltQRUtu!_+?6uMk zra72?$86BG91t+hXf~AG%~ByWoPi$$MiHABi!Snt@@w@50yCJe?+~=WSzwMF?X7lj z<^$mb2d0hz>Oz#t&KW-{L=LM*G|N^+aDJq?Cd6*c?kWPyLmUqA_RHs&G?fkKHnS}f zN?hSXAKM9BLT=aGedZ*kVD_G_>Ea&L(n=V;hiA7~qI}0vEFh*Tf%^{rGY`z}M0r2o zB5JSE;dp?@Kcnck@gGl+0bFs){}P3J!`JWB!s@iYjzWJbxaB9#F3&Ml_nI~x1Q=H+?| z7GDe70Q>S;=OZA;rpoW8XL8_Df2 ze~DVWJOnCk#V~_R<)wa^DQnoldsu^t<@q}fs=nR~Kd7$4>0E2dcx8ZM;@eT~u+`?U zqSoW{;$FdUg9{I!2l~!x9^z<15>8h5?z(D|<1~*Y;^6I*=v%Y)9Ww2mF%?FGF%R!w z=F0tZKM(5w?q&A^G!`~>lw(TI-*LRP%Cx-)#LRuq{C{LUEZvuUkyX^;CG8dinKq<}YdVps(lzbCJ)Mn@-u%&s@yD=6cp+;1ig%p-zFcY4XU|wUdse(=4KffCwRT;2D=pDkBx^3WAM$U9U zkL}RvWi@pm-^XRN$#nd(HKx*uS8wkB1lECQ$qea%y6dCXB`_Q5w=q>e6)I8B;aN(6{MgJ@fl&t0D9qb2d~S5Z?>ey)C` zMlg%ZRLc6}e|&+~UhV3`gEq3@>q3SZ16HL}T<*3x@|ol9#Jj&vxbLA9qP9V0QlCee z#7hJO3HV;MiH&p#LmYv*cUw*foVWvr!_rbxG=xcL^HUbtz`msO0pqmW+082ADaPW^ zHs@z7e$mgL`k16$^N|mrh!$tQn(lKxRD*1q%Y;y#MP}lEMAgoCmhuj3#Uf9ieP#|c_0_Io ze!)qHa-IDe7DSXaIn&H1E|N%|Ijh^9wr)G@?) zIa_8f4I$xU=BhyL2#76!^(9{Rd^yI#4R$=t)4WXO=`m-3^0o!!;%hN;!iHYtOCf!botWxtEQcLkS zl|t&9&|fD9QoXoenOe-6=|s?=KOvEDrp6{6Y{tlaTDz?Qhf^aFX>p)(tlI52u6Bf-YJuieHr5TuQs)++PBu+`6*3fiZVY@A0Ov$j*k59rLy=e zjw7)Y)4Z0w>lT!<Lb<&7oaods|#MT9FycrwXhs;eQ*eLe#6K;3QB0w_8ToYKsvy{(EgGfJh`uTr$x zHuQNfLH>m0cM>Xus)V4>XIN;`4o(7+AOl33mCLxP7;c5*3(KoZ@fC4Wsjs?pq#B`u zRF&H|v2g~^vntSPdYscz!2x=5V{ETf*7$aL$u)$orF-1U2Pm$lv_f8bDK$W|8MD%N z2QS8F{1|tX(QtA@#^T;kDyi`;R;H}aI*RkANZ?mX=tu=WZWD>yIjG8Ga1wDd&@?kO zMWWOoW)rh~R?ew89S_`Opi#f_w~8@))n&kR)i<`m%xC*goOvrb0HcfV-o7JVr&>s z^7=L-J+Tyx$0?xbBYTpUr1w_zL zVm%jv27L*l6L0iv(+6s@&O!9sBUxwu3PVc!;@nxE)A^_RTOmWx} z`LeI0r6^I9Z=&eLKtzYwfsWTfD?A-!)*1u@c$7Do6@iw;67a1B5^{^nTJlX_ zl8*8-ZluYuCw&|{e^Gc4U7qwEna5qCc2ikwV*;#To&i;x5 zo?sEuV4snlBZ()z5Y`X+cl-{l2s*{6*5xnnaq zTR~&D{+nxA3KI4_D#-{!{wO^6eZH4D(pmnotDEvn_R%Xzt*e~e`YkZ99BcNox&+Y` z@&=?F39D?YpOpzSpOiRKLqW#`G{}@w)D*tLyzHJ^B|M=#BqA-bI4K=8_T?k#s1p4! z(;XwS1Q*DEr~(myDkT3y6{=0uUn>H*3fbA5e(_+l!Lozjsdo6Od)lRWv3$Oq+%m9G zXd!m?IrHmRVUAPGj16T1rp)W!tGQJ{#c37n+t~6&!Ka_lquZV{%kJ&P130>s!HRP0 zeE`ttrY6Hq{|}&%bqQ#EgF2JQ2RVPS&-lGhSk6nKc{yRkG^+-EOyu5$k)cDg=v!gf zvbc4GbnnYRtGpWwr_7{)^JYq~G3jZS!3%Par9(845yo0(Os9Hgph22mAa6=R$@@^2 zkr`0LMAeCiTrT=f_U84G&l+5TaB@us_Tc!Hjdf5RpQ-xJ;D;W7*MORl^p&S|4ll=2 zT1dD0bTJ2Mnm$@s;Sv zJX1EeU!c|!*+j)_^Mf}?^OfiP;T6`S?}0Uwu=9A9rm^iG@MZ=J+7)h`q6VNUZkaQk zml)2BDU}YF6*S*V==X&d?{i9+;fkOYB%No%cbM+qHCwWlqagOt03v}yu^C9s*5Hx5 zYi^EeY(;i{`YpkewoTn2A$!Dj8rvqm^4|Es#PLZY3rZW3^~FEq_g>7igH}d?xr`wh zE1sLSIhi!uz8w7@ol!=v_X9um=|mo`ZdLusQ}|2t@~Q3J7>ftqy$v7whQDcdExIl} z68!C~(tQ~xuToFaS8x0WoJ#oNu@o<;?0qX_2z5{LV}H+$kAWfRqV)5T&Tf{X4%&7? zdbwY?OQ~`wcQ{GTN1m*Mf0Uo40fR58^)z=q>R#0onMMLi)^oqb-ySwg|EGP{ccuF( zU=#_t{cfSn<~%#V{_yG2B`NyHZ|V`n%{6YVay=eX{xRS!D0jv9S_N2h_@fE z3xC62j6rSVgjm_Z7>IE7i^qYuNj#6S~GdIGF>wdcTL{9+DR;DBOW*`71H5<>`!KQ&P3%8$zmJ>kkfv5`+Wpw4$n6ojuEtu_b?SZ_q2r{+5!pnw1r2LKXYKq@A_mu0!`F#6+^@z#toE_DFT5kQBiwUXD{we1 z=-9h%-#Qm@dfff5X|^ptIs?|EyK8M2DF)jc`C*>O1a@3U+OqxyqJJ@Eaeka?Vq>ON{_B?3xhVq8hN&pbhI`wU9Z|#{ z9`jgblBro5ZUXVYQIF>?=I@Ab#81pFAg@4H;Ed5URZn`3~Yu3lkJ-~z;^hQ z@qR0dE1@J$|Hz}RIDaPNRByO{+i!l>e>_;t)YIvM2St}3Sk-rYv;Cqs z%Xv#WLHi4e{>7}rSslv}uY0Kbo{e-luEt{fi3obg#{Eg~N-X7T|N8{4Q@AQZk-FyAN1oGmy zGMIpPL`N9@c6qrs|sUJUuII!kGq~ zS3xKk&K*QkBv4=TlwN*S!a>|O+{yt{p(K6T)S z5xzPq;5!fDN_P*x-?y8I3O<&^WpuGJ@WZn8?0oZlM=5Lb5sP7ZgiW%V@)?7|?5c4o z8fyT@;d3XkjsWFcX&6(wSSEEU3|xHGtt+ z`fu5zP5^SbR?e*WC@7*mCo(}@UfXKrsYAHiO*uS7Pi+zsi3JTSOMxUsdVSt5c|7ge*P! zpzJLYPDSUi&r|>D=@K{JaWt{sdAe>$%%eZ;`?S5#nE$!q-x#>OWgYs|rylrA6_L>Atwcff9L1ujcN6&du-mOOvRO#WcB zVw zg8n%t%0&0944^|?30#)gGgPq&x=mZPr}GAC>8evO5UfSVzo2$ULgqBU&veLZEyU^_ z_Q}nRAj8>_rrR-MW6`adA{gRpF6o`;!p3P%^ zi+4;#zk@Mp=RUWYe)Dr=<5(tQhHKk+mIf6}3Vxd&T2&%7*EWqFiGLbzO669%y8;(#zrUtC`|NBB z-(6?9wvGa=BKKTy>+{Ui=X)VaF#qcjhy3G5$M>9rh|zTn~YJp8R)UdymQAPRD>u7hc^EW&34 z9zI1qsGj66=!ZXEYB?iFZ;F#^W;QBWq#Cy+hy!wt`@R_ti^$>%TY1Tf; z^#h{&D;aGPOARb6UnRC7rfK0u^kV`}?*;7TSRi5C(eZ|aPbtq;%P2{>veo;1JAAMk zi#tl4SoKfCD4Kc&;ZU^VuxqqsMh%mlFQVsMO)13d8Z4-^M@uOgMx3vg`s`yC?nt^} z7pFl`$G(Do&m0V<5BXf$mqMP})q4#%e6?6oimoYnf`lDOb!hTNntqJ&+=w*=um|r` zIg4obSE8@Y%N9IiAZ}kEJ6#wC4z7J-W(3kRdZH8f9AaBW`ovOXNHcMhshM)7h@CIB za9g--)rz@1>T818&hXQbd2g(g)PjL?c<#zBgKlZBEDk;|sJ)z!L{x)Twr&Q=STS2hSN|pQe_G04|89M+ zqz+1;3dy6k!=y+`sE|<-rHieZY#%cyAhwGX-IL8`nX;N45psUjg)o%fwfm7gv;`k6q+ma zb;0ZEcwp?sb5D*gr#1e65}h+#7nrR~@LL9iMCp~^aAIv$yx}|5Mm5P{ChsaVv_z1I z{080q_SNu{c1x*X-Nv!BbgY7cIJqVLuT}*nF8=|)xE3=cQmymbNg>l9uFir5E(GVf2dDFaR>>u59ai`Y_KJz#btcgbFA=k>mm4_EYWF zd{8iqL#W)cT-##Qj-4&8hT@M+NE!nlEV|INq=|xmV6ADADW!Pnd0S_Os={}RpVpi0 zGo%Fz-eH@ML#XYtp6jFfGwRW%6d}1*QNdypSzQl_t{(R^7Y2FZ{$60E} z&CYh_sCD@2IWhGN#sYoU_xi#(%Fb|3MaIL435waX)TDXh6y`R*>+mx~`onJRVbUt` z?C00PciPsa8f*Myck}9n5pM3mm)rlm+Pnd=LO=$<=`qxHIgp)~mcmwgBuH*${b#Li zS{O8laEZv0KnV{BL>!rAUP4b>E&nCw&2+XbEX<^X6c76+2=ESUiT)FbZVVbqs1O8v zYe9G_AO-IlVC8YQ>Eet*&CG;LQuE-uuyASEb>HRAl?BTX9T90oM3KtGS>mpi^2hvw zAR95(M%JaVzKa6|0RBwSo^;8pSo2U{a+{10qhVTRXk-1DTnehOq;D`3@J6@WyAKVn z`7>!w{d7?hUyu5Ac^wmK;U-q&_YrbG^3|zVf2NXorpxZyZfvZqBz^bzv!O*JE&dAZ z4$j^YJdW?u;3c1zQL%A&YHP=N&3dlXu|R)rLG!bWjObb*NYsKtqJJ$tXRL2^!Ja8w z`d;C8$8D%rE^Eu@aA&Ou^0Le#f4z>|(7!}$cu-sko2z`{9u98BO&z)g)J8SrS=39;uzF^ye zi-L`H9OQf+VQ)nf?;rJmDz^kANXg|{%@s%(LgSxN6=gju#&gCmT3e5`I~Co8k6KxA zW)IHvBXUR3uRi;pFGvR(Xz7?(kPf0Gr*HJW

$XrM!NDsiVutG-G717Ci70+D5De zzR{^j0=Pv2>nrJQ%()LD0sQ4%VuQgHOJ{ymNkMtK{OaCqd%Dch zd2=m(sk3PjEoP&hmr&tctQ>976H!BS9!B78d!dj$TQb@DJD)1=4W=Ti?mJ>>7v!g= zWAY#O2!Dy%mx$&mhk)U;Q|D6IJYUHOO>9`XDANo^8KnA-PTMiRZUiW?Rj~jt>@_RN z91kpwf8@93n`x*tD<~!QBCQ5tnF@=2H@a+UG%*tLqzsZW3+upG+zN}D$tt1c* zV5`Go`4KO}oy`Lo!540gwz1{|S>tj zAfW;yP*z$6RI_=jnnEyfiK5Xxc>p%{R#=Tp5~wMyDvy%`UZ;Tm>kMF%^Y3I=JGb01 z?xnz>N(_H6NGe#1-Ymo^a9e2S1-FKs0SdN|mCcj^LC!&%7=cl{=#}esd0p{!@Q0-z zzHV+|D?ACHs#^Tlk;_dPmg~1+9<_2HZF@S2dK-v`GkqyOUjJe4j{)#l{VjHq?5o_b zjpbQfKBHoq4|GFHQ&5BaN+$vbx)Tk$)dI8l#*7EloTBEwTAC^U7E+0Eb^b6j$)uke z!n-!aJwe&Lvkw_G_wqC3GNDr=MhB5*vvcl6mo!#7^ zuQuII?NA-wMZMTMuaBMwLc9nsk2EjkGtVqmXsf18v-okVW(X_;NfF?Iam+w`ys@v7 zD0dT<&NsFCozQhGtt4)$-eS_t*o_(O+))G7#EDTHH4vxs_oQ|{DcyRe#AS+{WcoC1 zal7{wgJg|=CJkh-Ls*zu&f~LPz`8ef!F0AX!6&eJAvYs!e5!GbSW8CY3etTy=JKb4 zqSrE@Lct--w5J_meo4%>+P55!eAANyr)z(xK*29xiRfc46-pSG2^a|MEWNtdMnh?q z<{-WJdkWu=GSE%Wv_v6MQV6)a&H_Ivxxg6-esb(Dk+j9{Ap*c*ASa}57QO)(LCY9( zwHuy9heR&X0sb9=e5E~167!>6ug6XJHqcBST5eg67XukDmRB#w-W#f_TGvOfIL;cu zmUrYCN>n-4n|hMCer#sco%1uJP*S}?-I9a!I?VG4JVD=@pop4eIKX;4XIwHxn)GLw zENCd7RcuOeuLr5aBPL9?j*HkTf5fT+i)!Y}e&B9-22EQu z`_S^XyML~N9)aa!tFfI&YctArf{XVm6n* zTv5sZp)+h!BZWhP4I}~L^4QpxdRMMZW~y4^~9o{)4})tIz{qbBUs#WG6JXt|5|0 znVFYG@VF+5pT3k7_&~0v`dc(4WVOI?pgb@?ZYOnK0*`f!@Q9Nu zO=B+(7Q$W)3%!GnMZy4+<$vd|(UFMFRawsf(*It_wR&|?=dLk}@bClhrb0>f_5*V* z6A?qfKoQ|rRy1Luwx`G@62_aY?yV(Ib) zjI4POnsl&Fa6q~$b*tSqc4MOeidQAzePWjhSmKC#_8AUtwQHc5B(9XMmzd?XNxT-%} zgn-D!wn;U0j-|-|+!hw7*ESk{hY_?Ti z_j@O%6osfF=WHKRsKn{U+{>`+riAz|sX{Q~Yy*~AUvz%`SOa)asU?qT+=^Mh#l|JW>kZYi%jB=1#N6R~-Mg2ue*})}^Flxu!w3cN7%saQ z0O|5_X02pRD!TMP9u6BB(aR0@$yiFY9{KSn%h}I`KdU|C`M1wDLF_`3)?o?0*;m%* z3_v3_3BOy-xrUU6-_!l)k}djlKePTn6PB2$a|g>W=G`K*Ds3|DV*wQg;etQbH*EeA z{W8!4Vg;W%eAoy#>{~2f;@0`HUr$PBi$7T3zfnW@@e6GCQNpL6>3-vlt+q(Tk5vBk zWH)Sq&slsqsi!iIoVWhO1^(WtQP$Zu{d3x6Ve{z^SMK{n6+d2;{pjm=u)X`Z;}iL< zMQ)*mKMX(K{Ev4P+ogAvJ#qPuSLF)rvFE!U77sVii~5LXBTo?Lfx_pL0T#qYAFtZf z18IS@fOpE=hr1#-YnRRg>F>GNExa_nC$&1+S831lA-zWE$1i^1#dSNf-TFp3{?|J} z6G(}5Zt!ph?D^+`_}Y;U+mV*hh6P0rnhNWx);EijrV+d8;!&k8SXnO$M>^Y?ZZj>= z<21`M({!%HB@4JIDmy+A!X0UWh_WZxxjrR@qzi+#^2Q}cg*4gyhKp8NvoRU~4JvRW zR}V#;NuTJ!G}m9ZxMP`;NV2o+4G;lqMQnfi`M6h^n7|?cdETgzH1FqmQP*Tikdm(j zOTVSPysFbSKH7f7k|7aZ@g=9#)9h1$!0O?hWxrgRqokBd*Qy=(z4QG-{-b<+2sYZ< z3X+Wp)ZZ=K%_^a3-JbTN9vLex&v?GL@g%5%kbw$vA9t<8K8n495vA6y+NyuK!-tAS zX$J2Z@uJSz<%FkxiLJUVjN@!~lq`n%PfVpv2efDq?uBs=v$}jHH0eAf$3aAWijicU zD_e5iJtqM_g8E#jJvvT8$8kL*c4lG>uO!$!s8omSD)yXjHn+>WQa$0cFKoz~Gkbn| z^>b!m6o1Cq9mRAJB$xA+*DA%zucY>&71?~ch+P5>1h7CS<&CfHidRY}(x-V~f)o3^ z-^9vPZ{9A2F}~s*%#PiOCLBamlhhD%e3`;rOd1IsF0(@-xM(bdfb=zJ>45$x{Q^SV zvHlKwxs~^`bzU0*#LYB`_fye9{uht~&Lds7swdRRcIFY~s&7eMu-_>bloBi@yF+qsYPm z?51lNCT>dt>wz^TC?TiGM#mo5)i1Q4ne@lGR~wF6xn02_Ds)4esuS*2xmy0D{Unrr zp+LcX<3yLFIq=!I>%{eXOMCw&De})(?kJI3t3W<;JRlINAcsVS5 zrW}MUYS}moG6edePl5<8JT5GP(3^QCr=5EM zsnU_n_Dx|j5Z{@G7^fDqas(Oz_pK_Td{OLK&pj>X6yU30UQ?Orw3a-5OdZ$Xm<$Es ziAmP{Ur}57%qi%tNkg7Akx3{bE&xXz53EJQPI1Ab_lc|u756%L@(U{;id|82GfUo# zx|Uu*u>cp;GEjVx_6PB*&m*vV@N?H=W!E!PEe^v4!7}HQNGnhtuMbg>Ulq4S)8uub zvwOFWI;+coR)n+};!7?$VurN|k0vRp69APgH=SMn5GD~nvB23CSC=Ay>%oVPiFGjg zwP=XxPoOJ;dfCDMK-8tONU%A|4NP_gE&|>KOR_C2%BN&YK973HDt^kSi{JHsf+pRZ z%5y%vrpS^kgS(m1-s}RScz1|Z=VPemf0z4^mn|ST#!D&Qx7sI@ze^b8IcI;c> z6GZY!roW`)7XkYH{+X1-f@qbYsCgdOea#tun*qw$+J zS)zfaOzf(C-mLvf<-K7FyOs5`u5sZ};pHH9`GFuCTI-68ffEA*$R}0@PJh&4ugG2g z2-k{%WJcu+Eq&lEv;7Yx_jh)i3B#|A)YCaXi0`NVSbSXlBIw8Os}4Y{ppQaZ-?!4s zRKfpEnSK2OcuM@2GMfpc%$^qgmn7JZ`%4r7{n5?T+rsMbKq7psGyF^RB#Z7vRUj;f zD0QE*U-;q1YRs19Tl-9-i;#V>dxequH$3b!udXbWz5M_1hr0n;{RnrV@tmF%YTL8Z z`1*VDZVWdubD7}cW^nz7_A7&yp;kHvHJV>XetGUBAd6VvkHqIgV7-xX-Szd__&*!S z;r`-y4moi7n7mEX?~3f*)QGRpXDw$k2NmfFE>Lzy>=Pk=`>_=qGJ9HO&s=t|8W{G| zdgU(>`)=33scq^s4+&&3o4^nq{LeoV((7VXAHlNhb1N8YJi{!>xMIaEjVe5r!BlbR0MgrW{;R+mFewaiD@IW zTz6w&+thV~UJTX^Y4Gm(6B(=UwDU3r1yt*hWsO7T;Eup7+=>kEO{D%(i-7}^K+Bpy zTBqPxtDD(VN(n(@hXf@pK7YTMK|Ns|8kE@BXYCu$O$upmB{MMBh_<%(;yu!;oGQ?o zVMSN1ct^|H*Jz)$x2!II4MZza|`1~-{EA~-l zRNabQZZqSl*NzAoN~~*=Lsp41;|VBn5YXg?Rz0{emEhOh6Gre2);KRg;TzI)?m z$YaW(^*~M)4&@u$+uNV+pZ9$C`FXtewb$qJpD#F$9{XmXM6{$htFy<%lg3ma01|&k zX5KO?{iV|ex?hc3%T`R6xki^IP1|;MR^&LP)}=lx;An|_=+1GFR@K`$?)g41wt?kd z2n?tHifwyfVJeRmG|H69!4PeUwc#NBKEJ*;)0cwS2$2qfN#rMOSKiRK z{!-V#b~{cTd>+ojgouJsVdZ)k^n+?N2=o9mgs$R%mOu%yRX+^TY5og~*4~}D#YODQ zbv0?JyWD^soC^qCk6d7yZ&Hdbcq#7$y>g=e7-XE#GI)r%E%+7yb1U%)?6m6g<(P#P z$|F%HZM|q6qSaqNHa~oTnikqHZQaC`LvpvBCrqVG^5m%B`M$lC6(CR@>IEevfFhnr zGmAAnk5wDpPGZnunH`asT<+Ta=?6pXin7RMm>I4o(5VzzO!J0RNzx;V+ZpQH{5c( z)U8VYB?_z(k`o+sH8N1j4A2l40zhLwXl+UvnP{Lsdh{X602Vw0*pUJD)QX4%`QoIk z3!D5W3Y-bXA>4jSXGS(!W;#Y(m%(Y)Rwbz3|BJD=3X7xby0tsOf&>fhPH=a32~IaI z!QDMrfZ*;HoFEOs-Q5We!3pjj9P(G5=Y9A8eb==Q_5mDKS9PydJ-gPu#vFs*NzB7! zSmrMqrOnTWmH{pWdM%tT67R@cs#-2xg;F7c^>5v{6l&+E=F~*v)fE!uyez^s88xc{ zj|ZD{w!J$MWTsBv=cPbBEh35<@-MrQIKmA01HR;9j}w!=HeR_eC=|+3kx`kEE+@5} z)c`{n%~EUhG0n8u7}GyZ01lok@O5$Q8mD5sD?4mkSLMW(L$H)Fvg;8))fK)PnWr;$XbNZ$Vh7-3?14h z(92dt%1v{+&CF`5&Q5j3sRBBW?0Z3Z0%_zZr|+vPF=7(zzhyYFbtwnq=|(xq#aEkc zy)kY(BN}aBfC*Z$hVs(o$~Ubcg<9&bE*nc};|Nc_`iT-7>BzD`jl$_4Ai)H#9*0dF z@-Pa4>JWg}8ohA_KF%qZ zMiO8R1|jTL8;5BXum!dq2bW)Ie~)huZa&0EI#ZpTgQ$(PA5pyMRI^eDoa$BtQ|jJB1>bJpQl=2jWzsHUXn&uW|0 z9byK-Q(;qHE>*i{A76P06?5YnD|X@zvd9cmz!6?ws)H3;pTB1PvUVi5ZKb1vIv*QV z@FyP3RK!#EpgaB0-iyd!Xv_$dOB#||2K@}2C%D#`SR)7|VW->gErbW1_W$$ijE28K}jLs|M@_`Rkg zt$E%GEY8}oIH~kYX%bO`1zQKBO3AXDX8_O*a#6I1o?8&naK~s<8_~Chpb(YOHPEX! zzDS|2IQDzfWq4o=my z#1*A2XM)aVgDiKT$1wPBqWh&3DAiWPRVd`$Z&y6Q80-QOXIh+ z@$A1PS5eOTF0LL&s%`Y$BwyzF*p1fr+0StmpC0SC{BPe3Os zbzA*AhJlJ+C!iCTv0Q!wc*+&&Kr$?A(JYGyI?%kxCJLJ6mne%Vd~iKNC{X?bYPW1@ zLtv>?al0cHa;_``0?Yn@5`TN*KYxl{Id%1z=WPFLSze^>AAbQRsmn5W8#|x4a^Wgm z)oy?qIcG=0a!-<-*3(^7Fac zv;K6u9+*_V-LV&|-p$*Js(C!BdJul!v**qCOf2IMacn(%Qzb%)2GL^2E!Wm)W(x%> z-puU;L5r>-kLv2GCn6VyZz#R}MoaI0^Vf`JTV~^07WF(IG)0Hs@wf3Il(>d!{$*10 zaU%GAT0I|)A`2tziR4o7v`%rlUMtpL>QkRAt!gvSEt<+V9tX#$cT&}~!DLW>9V7}- z8sRJ)el;NRLCGQEzCA3Zwa$iJD|yf274Wd;=-lv>Fm~<%bsw|5UxXC$ z%`{-vGx24RZ4D@neWq5XY5W`Lh5;>Rm7LgC?CFEI-|6%M;=`5^Nm6qBh@PM|AxQmb zX5Ux3qQOK)o26qI0{7LYIFvl6qCO_05T2z#*tMKj-ebnF*dbde6BN(M?HOx<>khiF zb0HxuoEA+iNkh8T^;@Ujiz*w@pOAgq)$6|nnk{9CohDi)xc|IFJC7u#KFG0`{#L}% zo7E~`Q>Gn3GOF7gKu~hr+pnHR>2WDd#VGBt3$sNckyWqmTSBIIaBgs~!z!j#`4iR0 zV!N^b;u`1iQ^_~R%ld4fsazM-etfcdvg|0gSkrQO&zx|xO4SW59T`gIp%`X36HKU+ z$rx~XUrhEo@kdrwe5uvgE8ZH7FVP^t4B^zPvF5LyNxz=0Z z$~KAu#V00gV-i|n%)e#c_^Eivg-AzDu8fbdVXD}3bXiyTs9AZt~9Xl~>3ELzm$NT&L?Ck?g) zCA0~g(uW7PbrKbzpyqQP81 zNwX|4l2xQVf*%9p*K6?Z##g-w|4xvyVp7a}3PD&-9GLiol7~vw3P)_W=JH-R{^o>g z5j>OZS~?apmH-T74aNvJGuQ=& z@AGHULzTz&m$=^#F_oxdhs5M_gKEC~KoY{gy{A+i?GlxUnxAG{&ZBY52P1h0qI|5u1JV@z76OLDz{mPKJ z9o4@bPy*7Y#r4CwHp?)e;MH<|XEdqzrhgC8kAW6>-B^=(5&zXJj^m5zg%8$xTW6SN z63= zzNzT`+*-|L!BlZl(f`zPE%$N-q3aC^M^TYcOpvn$-fT2QWP&Ydo9{5+Jl&`?P|i#` zZucmIJp?~7mSJlhDwj}To`Faaza1RZ$Kn3*-9N0OefF|$_^kpOV|BWKQjw$?O7njY z&XrJv={aL$qzXAb-XL>CzHBC{SAJ9I0#KQhcmu0A<8Rt#az%b~$3xo~i~66afVnT( zJO*dtduEd#@uX?J4k=x1=oH;RV>p_keH@3Bgz`@1hwwW}Yz5&^FNrxXY=b}^hgK-n zy{bx=W5JIKMUt)>_&9w_cT4|h89C3}l&-|Pf3>!@$MM3IGuzojtM||mjYKoDPnV6e zyMs&R)URiN zf5S&uW36WUn@-1_(r;GA5k&|oT30977Sf{g#!lQ9c7C7ov*~G9 z&JC?f7ai4_bP7IQ{7y1si3DRA)x?i&cCNOmN=YCrO)1E@Fmt#Mk;L}^?lb+&(D78v z4mUyya2K}CnH6|Mqo@>Oe2o`pW6IV)R`zf&Olm`)6{AX&{62vYuZPfIf&JY%Z9VI^ zgrJ*KuOf2anJP?qmFcOTT{!?!aBWPSTuSn=YEB2tL*!kpsA5LGX`5;$pV~{%ogN=e z?mn)n=R5wU>F*bpojx66GXBmOGZr|nZYCH4j8^-c`TNr_lVGE$6e+N=E<RJJl#br+LgA_PcFwwCJ*bKcVpbUdXYGoNQ%Q- zXd1(XYzB@7K_DOy*Pyzjw$8m2Cm?44q;peKVP+p?E2FJC zPQvv=R6#(uF$4qz)pexc(64aK!*$BvT;6_mZ6`=xVgG8vXW(D%t74$#(t~~h4Nmf+pHnH?)z#HxIAG5l$4yOW#TWOj^$gvBFugVO zj>LVSf8F}cmcrI;BFC6jjHjzKG{H6ev zA1oJ}dh@VJ)=qa8Vk@)F@Mqpkc(Qn3Wr4bcTAdr-5~K#Oq!c)Y^A%;Z2Ing*z+c=I z?4n&!GXt5ELOCD$o>0TjlSHJ)SQ-@;)2BOXjnVh(o^z~S0#V8u&hyGO5oGIak;Ep+wFHMDZp&C?l8FeUqNhXT&IWfN5vd@4&a40LQ=aXBcCn#9#{b)Y^GBlx17)<>f=mixcOXVlQl3l?({1{hs!lpwvu0f`A^{Vufk(&viAMuGB5m z9Y;{}zZaLJvV04if~+q)*j}+x5|sD;VCZlh|nMOBY|k+FrdRU>1Fmc!G;z2`{(DC zm3>Rrt*em`R7r9(`)D^jxk@D&uMI~lJGG#!YAB;F^IAei9qZD2`{KQo77d$Zqq)fn zxhiIHU>7NOcG9! z!kLUoYm~;OCLpRyeKsdaWLgU;lf&%(8V9(}$i1`26(8Fvhk}q66SBiqPga8fyQ!}tbcncC}*e&qg<2vZ`zzn$5LYM{1 zZdwU@Z!de~KZTc?^|zIn24o2Q+>jO}jP+2Wlr`?4pHZflAh8%GsNKG4HGV_%dq3G= z6>8b9rYK(#R|hvuNhm{=P!+EqWR{MCcd6%Xut{7O!my|?BU>$#ezvUPlf7~Qi~8wW zzS4e<9_dR)5~pJw<0#28@G9ZWMjNtiR-w_b5xT+%Y-ot~-T+648b7!%)wy}|UA2$> zd~?y zZjPQwd|Vz${9NG8Ia}+{C`F?{2zk;iQ}ZCl;A@D4;LK)L)HlyVV}-AQcfrSo$n`Z` z$!uyQnE2!wJf-mz5uyfOkQ8qFX)VmJy`}>Cs%e`c?6#RR+!%GCopy4=pLe^o(B1ak zNS2{oa(%PVVVGV@qk!l&X>E+)OH>K?Q=Gk*56#w=hj|&+0R)$A(=zG=<~L(dGDII3 zVZKJ$*>3T-L~6Z^>T_dpy-YhL$vV{TuO+K&5#5YcC|_(}?&$m~4h$;j(Y_h9wD8l> zttS`CZfFt3E~fApxS!q$EoU^(61o87s3`moNPXb2cXT30GnDK%YTV&gsc;s5_x{Wu z(3uXyA5d+HD`Q8igu_1nd9*|MFkmBgu(c47x?ufK11Nuv_@BtNk1%a|I1Oq^+K@>r zHGlhm@n4>v+XITfe`WXFM)i@(V>V?0z@wrf%et+Oocx#bG*z!FE0M1qDkSFVA4KLg z%ep5YCKuj`OgFS~Ol7ye1%!SJ{VJcj4$pl7srgOp<;On%Mg!7)|8lxNAnrdPAFNJI z*Gf%ngy~8hqC_`u)yz@>=TdFTQMd^N{Le6YHaphO#fA);C;az1omLAYbB{_L8kI|~ zq15BW$qpBVwQY6}>9UhJAZHNj20|R_|6CD{MH5xa<|W9kV7s~rrfbW=l1SQ2Gx(ON zHznA$EfzX9RXEE}kRu67j{YOW7}h}46r$p&Gl$@i13PvmUTQywy5%}4`VP{*GfC-j zFv`&peYS#{M^oAF|NP#oGlnK4MxKH}V^rYW)WAYW^_le|l~HFz3p3IowfrVaMU-Vo z28&JlH3dcNeJPR6csN8wZXx#%=w4qPqg9#Tb{3X#Hw&X0-qPQEhhe=i&um^#Pzq$CVd3=K{|rbpTv7OU>?NJdHPnX!xi06j zr%DZ-r6Sq&k3r0r9IaZ0=(^a=RlD}o14a1>bW8ZuNX<97r4ig8z1dlnW&@Uvm834^ zwGZ>N)SddPWU}*3wM{Qfc!PFBn)lRwTRyq|hpD7- zmU<^FPNj&>R&5LoV*j4K1~~iF-?KA>8_1DxY!;Y96#G-2o>v9Z*5q}3_mLw_G4=b= z!g&+gGfVrDtGS5sHaC98$O*dHi$XJ#LfsYcK&qQ8R+_pt|(Z^g}xcqz5 zh~6op4A%>KTxb_GAB$x4rcwy}R(Nz^q89!wzi>T>I{t!)yNUlFyLkVLTDJE8x(Gb*9Pukv!iZF= z9`#^%)Y7z3OC7Pfo&817tN$Wsw>zYqBh-Y$KOn?sr)koN&Ef}*g%Ey@1)~K(4^nlY zR6+aFS$24CD_kQqfiDI`s&!w#RNqA}iU7T$Yhk}10AE3sKOp^K__3GW&QIF3P%Lpv zaJ}!hWsrroVl$KX82!mr#Hms#^pxvh@_FF@9P*9bF-d;s^Vo}q%aEKfh*@jVTU-F1 z=O5MNDb&pB#+q);3R6zW`E6gkvqCt7TT^>De^IhItNP3^N`jyT@4(j3FHKgC9dBox zv!k-V+7W=%NM+!XqOuQssr<^^q5lbM-4~klt5*g;pDkAyy~8)egCjNULQ5DKFbb|^d<2{n#ZNELFQNzHyY#w7lTQ}FrDySXQ^Q!1 zjLH{V^mGg>UD!eo*K~l)6!M{pgp;+Km}^?WkvhH53TLOL3Iq~(IN1E9T^KbETbH+! zER6(70jHRg2Wgd|_a^907Ck&+d72s=~pPC0R5G4LIPdms<4q%_^qvv zOD)?YaBJNT#-Fz77}aUdUHY7(!>*=Z_%EAI$&Mz?AD=ah6J$$_gFi+k5->_uk8Gzn z`;m{T$s4?}rpwkg)~N0-ltUM0ic=5cHA!CD1HT*8B*HouM44mdA63q*?QAcPAWlMV z4_xbeERL%~pPdn$kqpgV9;wMqeCG%d7`D;>VuP-89H4Q=teE+XD*HNt**^F64YH5J zePcn3T0%>~FLXPIs=iE?l??^cIgPy+%ROxU?9ZSfcQZ}n!J*fsRQLQn>XNn6b`(sC zZcLm_JfGM?tg!eOr|%0GKKK4UXb~!FGIqc#i^)%U#cyyYAVD6Zz_tpGbQK=ca;j$y zaoOxgnE>K3Rj?r#;w35|7(F{due`GuZ4Ew*q*XwrQC9G}TxbDb72QK7| zQxGBw)J{T0L{nkTe-s_1z%vUQN)%E~kVHXBYY6;QMl-$j21gSP^Cn)O)2^j$W%i~X%x$otu$g#>Yq_zY=t;a~iG48iYt2m6QBwO?hO+{>W% zRS#y}fJf^?o)%x)0cO;8nK^W%2y~=Y?XI3~;+bf~S5kekU=WNN^M6MTZkFB4ijR=~ z%#@7D`3eIAf*}e~;i8pQQw~qM|R92 z5|}IfoUo+1Za(y$8r6>5BzZMGGi5xzOtG876}{K-6mlsTO)0>glFWda6+K0dGK!JR zGLAgB=p&03kpc^iXHrNXCV$3x9|2_yi_b+Qpny_HxoBxe4 z>!rvnD=6?_<+jzg-7n|=mXV#l9kw|3PMCdwu-yZ_5U6dK3D8g)h^BdSAOaSQA)RkJ50AQ*4~u!x2<^;UrbqTBJB@_=kY&?1F~DU#Ltq znf-PFb8|TZN(*bRMjn&V0aY>;ygUsC*AB+6mv+&lIrOf9Bdi&~@Jq(eCQU3MD4deP zYy!fbnqeXM^DY;}SW567P=T^m_hS~SLIfFVTC>@Y%_b*1_Nu~0*aM@!gMj$@ z0}1;eVdd^fS~h;Z1CDH=T1D|Dz1#To`yuNGrx)8U+uvmGEXRe93q3C)ae#$!ETCYq zkH4|=8J`v&ul&0PufO7vedO*iKOi+<7+a-0Ub@dL^vyKnaE-0C`M(~TeZ$46J|!_^ zSg)#6cYSbSLBDcIbp30}J>2>eo^A?jEgZWd#Z`)0Wb{n|&ReQ>b!vfrLT9I53Xgk+xPcBXczuwff=@q)y z=1E- zVShT0R4o3*)hAp6r5?ie7uUciWMf(UbWIs$-wu?dV_x*+0dNiJ|H3tlw!O7Y;q2vU z+kv9p`txJHgRiql0rJ8GQBWTgf{0qt{gO^JmdqGs50P>;P3xP0za4u9Mmm7rz;dp3 znF~_SWrJ_D$U>l0%FIWz@iFEIfQHzZ>|hm@ISsXu1U6UC;y@gr6VZ)#DCB^GGB6)$ z$+{W(Njw73I`IPO6$Y0RNPqLP2GRwpx7mh6fp)|_9e}<34OX(Tb_PCy6)X#}C?YFL zz*K^M&E)A5F#Quyr~Y>O5yC&9X%5W{Q%BZM$r1WB&}BvTnn8vr;ttEdP-^x_2Fope zSXu3oF<-+;nVum`bf;;4xLGEJC1O@qBR?dhq9z6Gk|C(xi%bbGP1MX!SQwVTKvq=w ztvoxw1He*Q5kGK^{+wC(o`o zHLZXc>9T8|aAT(~#g@(KL>3LD-z&w~Z4dFB|CoTqM6zMm7|(bzTid}gIXO44HWuR| z6XI!P`f)^TFeyHBkX?qp21(~Ze({iDA6l{@Yei3)^?d0G=8VF{E6}awBML-tJCq|U zMPHks@49c18m*@@#NadcrmXu9I4XKynio;QM+S8dA8kFq%x~#m0tsxTcf9)CQILM3 zDj}h%EyvDdM+NjQYZGg0D>lpFx4BR`mkreUlFR6;s!n6MajH;px?w1jtz=W9F$DL# zPo%Y=jEfQ|2D_-wy%G76OlGW4j>9)<;;N**jLo-M{P%n23mJUNi2)h7{;e?CD$rb< ztRWfM@Va%G!$*whc}%@myV~C;oos^hvu-0-#sMkgF~oic^7m_NF{d*#(~Yg?tUkvU zuw&t(OkW<3+W{)yFn0(eGgKKF?NgbA%&RGNu#dt?vL?B@nK^JvcC1YGv$xquO=J0z ztEvMcD6k467KBadutpi_RFQ^P0>w)}z|<+&EJXxJq0%t#<|BDD?7YBiKpeVVdVbk; zmNXvB7Mu|7OpQFT+1Y&5x%cdtPaRlx^Y&|#YpB4jVYdI2AD>SZBbC@mV!pL?t1i6a zj@$gAOgp%Jc{w-hjE|}H6NKP@>BTvsFNTnYkQEDThemoY=V%@gobJl_b*Ku@$FkWF z#zrg=|0)!fIf6afF?m__DfeHl43;%|+Jb5>zqKDSS&hH+zq)lXrQBH$YrbVG^?;C7 zsqXZ-%;sMNEt)^edjSvu`e6etW&Q5OaC^2GYuhWkGaw862UNH3u?egV#TG_rmlk^7 zh)m64h0ruLTKS2M}&5lsX`Fv}rDTE1!}N5lx0rN8bA6Ti*=JQv3l8mwZnT zjG+Zp4H>KeIbhagi{-9mX9*tKfnS|;(fDsiQ{p2!IN98_y?eFqNyC=b^yQTqu?efj zvT{?2HqvW`mcTK4h9`*~po7bDlSIx6Mf({=Qyv zNJ3arbxcokHm_7JfRq<{wA*V))9GMX)#)J5*NnsDz*LWvR+4+>u`_;giBA|uJ1&Bg zbT+)$!kCL2jc1q+ChqX#@X;|(kzF-08?+J-z3O1{JNmi5{I-tH{;AU0VY{(ApTs(5 zC3I~ta_u>@(YHCvo`#Ms0WzEp&USEg>U1ID5JcDW5~w4+yx1C3f&6s95T0~ML?hF| z%xFs!z06_=@D!csQXc%~;5i~+`r(ODR0HwCuiXpYt#f#kSX$1U!T~#NYbpI5gNK)Q z`(UEB(nloI_ezvZJzjV1^CbGV_%)#%d%yNbLtYdScSNCBkqo4va;ltO^#T1%6>56m12wRv4kY%YI6CY{T3Z$u^~ zf4I7|)^r~gF5O+85!}N9UtG9<{{|B_Fz5G1`c|?Km1brm zF~<;uS~KY*Gp|OI>)qJ}tK;~Vv88l>qWa@6^d-PN-ArW|+dcY99Ws}{La4^|1kTI$ zoaM0@fn}H)XXBo;zpN+O_D#d$V*JbmXo9HKGi_wpTpL~WJB!pzJ=mVANCU!$_q=ub ztb*AF8JBP{(4#6J^naiw-g6=C5rwvB`9%8%`>0<$30}|DzkQh$`E_dL^*P=pqhJ#( z?iAZ}>vC5Q{;e(~?X9L=5LMa16Tuf;{gRkZGh)EX7BO|>mOF4wgfQ!SsaJoW7BR|tTShmY5 zO`7M2O>2?LvSeFzJA!rC*3TGETS}uSAHZtnV|RJ(WHx?9+;^_4zcJ*?fB8$18Ie%l z_#eYRhvOpPP`Mb=Hb>xp6DfT@Ge9G$uAnb?SlY7oFSOjmwWK6bW4h0++D7vtYPKx4 z_9#|GuQ41D!D~F4p5R0zS*jgn){pZt=-J#GYHK}4M)O$`4H`AuSr5MGOzngwppsCm zEO<31J_BWT@r)ptND<@8o3>5_=(l<3pl~t)tkx+TpWJ$7V)mPWmU+BJkIN%#wBaa? z8=jqtHSPS`7c~X1Eq`267WYQ;#=})RFmYJwFP8N`pjQ7KGB4K+IfYFb(aqJmZBfSJ z)=@hpY2iom9uZ;b6AzuOKcJL&h`mn(&L5B{oCF~Q%r0rr2b(1x$}|+d0Q*g@OXcT` z)zpWzJ>T&DfSpi4Tw(AiJ;wLBHx1{M{~q^c++M*yk+O2pzqZ7{t_SAE8Qhq8LF`xb z$WMIJ$+8EdNJZ_1PL)z>M56`$_0_=B$%@zE z<+ET0S1ra*z;j?T=-jHGnWoPW??Py6NE&dVoMF4rxqVUOtZlmU0=(cIn0!M2fPPoT z{Q)%^k^?p=1-Q3AtC`j}T~0hK@6EJI{Zq4kRBC_yaMts)KQyt zUP5;3?tHd)VR z6PDO+J!?EmNG}?gCl7Xco7Tkc&hFtI^LYdVA+A5YBO_#qqXQ7fV?`)QKHhsB&|wOu zW~>Y+SFTpjn0!S)Wr<$(35hn{9b=S<@QNiTov9c!WBu_s?034r1gnrO7A@$Z8#unR z^8+Y9(O^1H=$n2GwZ4%E<;dhgVa+Pgt04zFu~qx7x+pY1>el9*n}d6S`#6^5@Mo4F zAhvaSbtk^rbpC|z4=A*&fxuJW8&3P@hIq^bhr`X=HyVFH*!}l^K!Y3eHYg4>+;u)_ ztIb;Vor~WB;q;hHDCv-JR>DVhwRsbfS8g%~&Z_xWOKdKy#DEQjn)Ty3rs=d!Q-Srb zZ7;~qcRjc0(glth#g|Uq=T!;o2$?GmPVo(e+YC}W3jPwm!~o>K7dM#F86nnqbSILk zo{rq&Y$KW1FxNJ{eS5p~P^w)MW5J#_W|J1LbrYC_*|)TDg|_z)psz#hznTYZhKwDjtN z(C(U2(OE}o6AKv^gZcJV^NYdR_OtK9bMni$Uk_krSfSWy(`ncTG2Dky8L6ohaGz;y z?fgxmue`;cU&eES&g`G6+LT;5zL=QterMQWJo;>8e)rQ*H7UmA6;g;{Xw$mn%0`~U z(-KvEXwT~2y{lGpV{)w5lyQH=YiFA6tGkER`};R{Xr3>Ms2gpe!WQ=verenDL_ePx zy(cdBFX_9`Wd_hFD*<7Pzxe*;!hI(XK>B8KHx2U{~18?_n$#!xu12563mMDw zE%&aVfCqPW*au12SJjK|Jt{TK2yn&rwb#`)2A`m;=QDA_a{TS*Q?s@Q;hH97 z4OmBwmvcl>W(%c0nVpzR7p94C5DN(To8juur$0j+$9iiR_kqR3$&2W(5>n+HVV@v2 z>Hq7I$MEqET*{0a`!*3x1;=Tk&HHp76Gbf>i1?h0S?n%NVfm|z&1ICgNI^6#dt z+zrX^`0n$2ivT7_VrG~2HmVdDqj4FwqQwWTkz0p3Ap2%H*grA1@-g3TEPF3`CPxRGU z?Nt1AyH%eSk@acE1vJ}TnvdwAZ!F!{Q}DgJah&V8f@R@=pr=MODrt$RMNL`}7@6)a=n z?c5^Wr}#*Mj07WXgV9dr@4BO}J|&c@m*Lv~a&$wk<(mj(+zX-~z3RQU#Yg+#om=-x zaY6BWywM-f1fDtX2aFcF6!wt|H^+CKCqyZy8s<+ROFg-Oq(Eo#<#evF2J33(circE zi*AnHD|ajU16vsH$q0A%`MbSkmGAIbmeE+!g`S&+IVqD1?|rH<@7o-cId9)#->TZ- zi3eVBNETSdzuvrBx!(kt@BYAy+Cd$&%D22YCT4Owx%rNltp^GTGRNuabIlyDrt7FV zE%4~Zq4Y$4Gop>m6a|yI1xL_h;C|JmLLB>b2aizH!;IPzv8r47>tanmSakk!+w#Kn z6VmUDcjAvrEQ<@xrn`kJ@bMFsuUJhHC3!qR1l=c6qcuM5x*EJh>GJTha@yTrP_$$` z9`7(HqP1lGC7yoO_d@TfRBu^`T=$K#W@%QBOPbu8M`#%c7soQyf4O*X`zg+g4?7&) z>z3yJ;#X6>AD&UNhU*7^*DklWi=eOMZuHMhqs;^tKX>!tqMa;%@qJRhLKOo{gSw^p8&7HJUnpnn?!|{Xc zFuH}$_P*#I8bk82h}AsFL7%kFBExbk)Qe{Gyj!WV$4#VWdM6hsdvddqjCsYw!8kBo zPA6002)Fs=g9sr@ZzsHWZR^FM_vpV-A7eBwNjKKQwe+_d6xx+>z;n2i%4K?38)uG- zBMViyzm^?O`207l{3~Cv2t%IFzsz3c*j!gM-SXWIiZ!(l)h_>#X>7?Fd16~{9n5Qf zLb>w2(w~e$)O~k1PRxursQKzDbau)fQk#@J_S*O8$BC&o^H&9gYMi$>PSL{GLa&?h z2JQ42WQ-}Ub^d@Bw0MyfZ=*BEdm}@iS?1Sw#(7p(#oeQ01>E$whT8_l&Hz=IK=ZoK zY1)@kp}_9}ZwW`(b-NpV<>kgux87ETo3;etAm%Q9HBtk*wxac(aI30xqTT2n=l8f|M|854zaDnSa5mYh z6G_4}TtIT1OovTp>5unsrU==gF)F_dc5o=9KYy$~i8ZoU3kwV9(tLr#G+eLsTb$Vy znSZm3>OR{$g<#;?5-XLd!xP&sASO3pgu?c4L82`%X;{J1{tW=&| z)(NZz?7O93heQ}53k7v^$_}HzJ2OPh4CA1cOY6kPY?dwA97NENABcMR=y-#_)+|ZF z;_bvg4J#Z?!8xJJML~TZ` z)o>}d+Syrotlj{Td-=rXwD(mZF>v~h`)iThBE9H$qNit zEznHtM4fF-!2XR(>lH{1ZhR^={#L`}H6NER+se*xDN^)D$z0-nXHIT1b{gd!^%H&F_x;^9k;8yZn!O~aJ9VHi7HeuHh%GJ)XR zl_{~=0BtQb48P!6!SUGS+s+^O_390qgDR|TNd#*Jr-u}=a?Mk*9 zx`0)9jv;Xp98UYOP#aZQo9$+c`xJbN6f{U%F>*2W^HmXEMP9P^rJid+o6IOpR%E&k zOvALWoYKrYMXCiGZS+#8TUTOvRQ_kODo*qS(J?L1h*oYKF9-xX0I?xH6WmDHs=>2E z6H7eQm-;oZ8qWKzhF`bw1^(s=ZUyJWhsa~pr0HGeng!nkzWJ?)JyBq>2J1OBOC1Oi5pgI>iNtE|NJ$`Iz;-^-$oc(%Xu zA!X`toLipYD|Wm6;cT}0A~HZe%JBd0_1)LiBnq{pm20jtB6-UZ@Ccs!+i z5}qsXwo!@C-sBUAK4@t&C>$Y6YhQ!za**qa z&}IGkZQdlIW)OYo$MW5}UnX4xQ5d%mKG_#NH>2-c-e?!n+8jEo$_EwxpgK=v8B zDcgYlID6u`Vf-8#WaOCbo9+aP?qC*}5X$1PuG2o*=9%F2N~?IRV$|j8CDG?^e-a7# zU@LOPjG&V?7w=I8_@@q2_b2MytIeV8L>d!k9 zijeoqPL6V?{Owm`mm1i_&K+ZB1sCCA(@Hq(C>hHwTu(FTCLu?m*^HZ?^{zbTwsm@`h-N zU-k_x@FTyP?2By$-tI7?{Iu7x+Oqbj^c~oZLF6UjE+-AJHHqF>#CCpkAQDPM^Zg1J zx3?l%5nnRxZ=SlRI)G4=(JIHJmHCy7vKy&eev84-mC{hN){!os_QS3)$iKjjC@_5)xUU<8t_VRJ^6p zU<_FG7rav0H8_PygS&L&xJ&<2rB^4KIZnmKm%{A!L~|((hPECKqaC}q_nYSD1f3NW zF}N?)y64U2pdA%7*Zy_%kQL9T?k#luyrWyTaq`tSA_kfcMORr}ksY=wP|FervPbXuyGFHb*YmkSE)uzGhet%o`De487-t3AFe8Ej- zqcjD9Hqr13)Gi6PQJE`S-cTimYjN^e$sN$Ng25M^&jh$y%7@r6EBK^- zCTQP=bDPjOeJ8^>vt#{s-6II&q}VxDly4^6xewRVU^zCayLVCSmr!aaekF-?C+A?h z?+^rgLNoVW&1H!zPRhpV*Wz0_ z_uEgW2gEsYD#${|RMNI|(*b=wE4sJpwANc80-bD+ z9w`Lir9L}~B%36(er!`W7=~ml0&8;4?@aoQR%=e$nThqU;Lj?)z;p*lDxI@TXo^qS4oFS9z}lj?9buAcq@<(IfqWiJ`2yjJg@ocK`t z}n6bWSKEy*gp>TFp+%X^)nhRQU?6=5`l(xFy*FmRn#k znLpC}LTCHl(__%~H?}wFkEAFywJdF8WolV9B0fe7^n?UD z;zRPbwHV?0UIi6~FuE9)@+pnW!x>N!;Z_GK$Enh9Tsg(R{O+G~U~f-e%C%mAl`qV# zUsZB8!e$b^Jx`G=&t{5`e}Mh{scLqlX!~F&GSzl~`0V~eS;`^aJC-EgL&ESw~fAIGj z>fxfp-kZWoS6<5eRp)tv!Z%{c<11AzUy#xhYN28mxVoe^tWs z8-D`FpZWfJ;&HSK5slv0;&SCTE{h~nE)WRdL~QBmxa;uxCW3IrJqLW9W+iV+-$H~39DLhM(?1ga-}=j?PBKMr(4{@R)4BU6{(W6F<8Z>Cpqhb0{u0SB^9iIxM zu*-AXXp!~Ux)Fg9goQx1R`>OXoTfNGJw$roQ74JV?Ck~JAjcXD(wUiGmM64gBCv2W zPlJR9KG_ck+acilWIP{ihlA~q@O`o#54JI;nNQ;uDt2^5a>SVYVdjP z(h7^Kkmmjx3z7B*po^fg;ECs{0_moD0js(0U;|6Ta>L2aLf335Iahu@L+_Kn+3P}A zOC=Y05W81PoyEYfT{_aQOPrHTq3-(r!B18rUF(BK`N=5t+mf*cm{Gh!W!bYnX$T_T zhT6eROq@JB2>~lBaArx1leGQv1{sDFsi5rustD=Ec8Z70EhMoeUqm*0O5`iH#$kcO z0YxD}B@Kgx3)tVIj{{UaQIpw|-Pi@x3yMD!#I8UFX zJ$UipYG8`<-b{Q5&iO`*TN-mjAluclqt+}t(#9nK3(~?qD5BR}+9N;UjK~1>d?dOK z@wkeBZEfx)dOBXrU}RY*Q4`B(+}R(~v3ChabGr;Ly+Rc}@lC z7^u*9e}m%cUDIFC%UFC~h(r2_v19I-y%s^IQc|5pAy0YQiq2bXReytTy{4h+e!FQI z^X!D9fOGDz=i|lzGIA%Jo_ZmRSPUl1L%(y4?Hnu=C016s{yL+z0h{R^dCu0{eLfE zY8AM5%X-?vXCo?BlSTKg(~FVBy_NZTXBiRW@t z%Krd~j{p)WK8Zq={o6iI_)7C*(x1QlHpF|Qo?9=YWlA;xi8nNmezBj+nTN-A=Cu?B zgRZ(QM&)}YKQ!p`QjGG*n2HvcuOWZvQ^}c23UGer^G|`bLaude6dJL4T9l?~l?2zV z?qIO8R4$}#M=AkSbuxNJ4}mnLl^(Q@!tImos2V7o6;phbHBh{gPavD9J~-{0ty>A? zo;;{OKyEbH7Z~3T^);j**XcA5z@ImTcBG+Q!Z$g)6HlW3g|DB1x>NW60EXC4baTsP z^lYgMv?6RyiWc#XodIutyt33lsotki%ZO>~(cR}nVp+*PTZ+fAbKke13ZQslncvFZ zXbSwiFDtMZ?G9|8b}Xn%pfCsg7X!N!%fB{GstooIkpBP}rNS^V;d-GZjs_C3gWRul z?pm84jh$ZYfC9M(S&$&+UQvj2kWIWbObgVH@wym;*2czkb4{G*R z8OijX(NVUmNoc|Z3_4dRsp4YNsLHgrneOh_h#`hz4YHAM-yH@q7esS}7WROUUQuQB!yGmq1plU=+nThFOf4nW+&4ub{0M)V-64b%i zJDo@>LPxa1=YYc#pP!&Ae*XaQ+X?QDd2GIol_UYT@B#Epq8ZCX4Q;H+Crv>C)GIa1 z3e*`aYEKngjvv~KDIJw;E%mwIb1YUqmn{Yp*B9mZ`W$BoGdnZD2={~Z(a$J(-IV+- zMjpg4ipNS%d$ADRb4=6OloxpDQPvsbmVWv_B(OPKB{cA_YwySk@9x~^ZGr}NSvoXRl$3D@*^gPq{{R7(OYS}KHT)MrX1D8aIq5*B@B00O_eVUoI3o|4z^Uy!ddv#1&K=Wz z$KdR#ALoiXXL$Z|jt-x2{^R`g65ogYUY;wDwEqBqq)!W{zq>zsq6h5wTly1!@cO^c zpdO!zLH-Qi@*s5I_C@yiRWvdCzKaR_^lzaCeSaWdE7zzm*98yUE5BB{Ipw-_x&tlV z5v8u|C*6+cwtB9+HR#B5k9!-R;})`G?cx%}>BUnsWC$4p$O8nr$^k_+iH8gxg*l zyQ(3%XerbA^=QOD-^8F4 z*|h9EFAA1RjMyf&mV>_k00$Y2T##oFFqSdmXiT*02NGIQxyHRJnN+c%#|{{SmK9CSV7 z(4{|quRpBa9Pu}lD)`NSB5ZS=VAAXvaOsW1q8qgSc3;361ylY32~ zpsbW5=sf&rJR*P|+tH8hAfMs%;x?uFa~F(~mJ4heIwws$pMIj2Ky0Vls5b~)Si0ek zdg`7O-eB(sJajj%<{8Gbzatmyj&j9xY2^F*ID2jU?74#V3^nz<9%ClbgFcWnl2u_= zTkgd@yjRv3;p4>%L-c_^l5QzjXNZ^#CjpM%__?NqXEyl|_))$9sJiP&G`RZHREwRY z)e8+9s!iyFeloeoI5D{95M&bu!~w8>8vV-W!{?7|*|TQNn-@~Ws!87<+z9}Fo(|Em z^w&Dug=yu7$K!tvX6+4*-rZ!cltjNmnI0L_q}{S^*!#S=i^J6#CsFKCiGt!27c0^H z4~`^GZcOz+um*hG2Xe00Bpohe8XJ&_4y_zU#-M*t(mTSc(%_P5WNRDMy%+`R3Byr) zNj}G5FG4OOb~FbmL5CD+)od_8=$U4#DqY|e-u;|iMgxfX={8JZeK+bJCuT0LI7C?Y zSn-52Nn6lku-W|&%Zoq~8~*^c#|yZ+q1Z$P;%m{!3G^tAL0y`fa^qgoRqW}(ZwIEg zpQ%({a4qvQ!;{reS5WJ+tr-kDazB&SSc!AI=)Yw_2*~MO0O5{_WA)4Li(;G#>--Kn z?c$d~^yy0n^v)T1y29x6Bp^5{M}ZI(VSCN|WRi93*RNi^diCqqyD@9yjxpT!!l2dM zVf)d3xTI>M1@xxidKH<`KxRXlu)9&yP+puauPFg>5Yd6sA|QujU{1#b2xF_|b|^(* zP!~p-9^L-{`KI@A(btK4C0h1QbPFtM+(JKj5HfcJs$n{(25kxd)}{ibQUvNF*fKqU;BrcuY$`*<5`1FW#wAoF&Ja64ehsl$o~Bj58?CXEHJ zQ>|aOkb=3g!!iotTT;^G*yZPkM|RDHXh)LcN0HIs-u~hsqX{3yDpaXbrAn15RH;(7 zAZgpmNbJBc9}C^eU#&9Rzu*B(_QR#o#@f$d{{YZnk<8n4JbCTxy=}uhTtm7c^Pz)# z7n*-PaAQx2`{hXT^?R&NQq$lop2(L9Qn3KNkeRU?gJ>YyB#uKXTRSLEge!$(+WS}r zAtsRbJsQf%0XE=Jrti%K=QxHPLSVf`XrD=+@8^unn*s4Mh_F#~L^?8VbR7Qxe>hAM z>H0Akj!yJPdhE+hB!}~LeE$ISah@6qM2o+AP^+q57vL9j$98mPZv{6Cro(!PRv2>n zc%GSVr0yuhKRo1W0-g%N_wSJKf5gwzf7|~3GdMn9_ORnSzE|`3JYF2%&{Yp}r}=ju zjrw^}*Nu92!+#;%bMfDp8RItaJumcU{{ZXN`nl7C*t14rKgHRcp~a-r7PcD6p`qdq?Hkvy0EbQ6kiO@!!36R1=giGN+2jj!#>)ZH^9y z-?@7@25qzWSMimErz*n8`q@$pZTR0z0323W7m-v&RnKmb6eYlQ+HQ>PicrLD51-** zR96&Y$sPqArnmukKpy3>H?`+FLXAvFu$Ap-=?GbqOvF@pCm5Uo$K;>a_{;I80s%eE zt5R*y7F<9;vSfOha0kEa=R`TQ0>wZBTuwor{q9dO&Djvs!l=8MGm(By`4p_yAt12P$c%J+my2^L7 z(SlhEA+h~Qv#(AzaD0xtzL3!_>2bO9&q)d8eMGzcKc9$*%?itY!xH}hgp{h)KbHgG zeV6b)7r6|d=6CwxWg!Rget5rC4S@kJS#i-(j#@V$dnTHyD_4F4I#=iRbBB*eLnXD- z0eLx@QtBnlxDz-Y-FSCv?KWx|niSmYyK&oq%or76$8lFJNbx1E z^pP%%(&Ng*NFNl1ZYbbMHUPw8L{)rc?cW6i>-p+@k{jL@Ve+I|ij||x$l7oMXVLw? z-f$)f~I(_M}S5Hn8>TbGlsp&mODAW3&fBb%6#-B;=xsgI{{Y@UoELF~CbJIW7q)yNC_BNbet0>h#Eyzij>4m{6ncWOAxup$%VzhR zgfHHH(y)yJbx#ADMHhSl?hLjmB5mz6OLy8}w;UcWXBx@a4XVJ4aT4J^yuel%a}!1r zoc{o#pbrM%YrVRMF4CE^1)Y0LOW12gur{vo1%t3+J$G6Z;@(zw?r7?23wRTvaZoVO z)A6k`II~{2EIkE;lozI!xmYhVd9dnU8yCZ_#s=$jMsP!5K5=fZ#{l+toymxHT%>t4 zgzF7@(n{dtSE5V``p_Q!M&UAAV1JOW@W+ep!VI@bf7LLyLJIgLnj)g==VGJ6T$!(t4UX!pd&2lvU(WX8}cvt=sW{HAdi z1qOp=H2Xsp7s`nTS?4__Zp)ys0Bfd$x86+kw-}Un05D3>U_Lz~4>4QRvlJ$<1ixJH4Mt{%=oxbF1z zT%{}}>(#Pe-d}%j670Q^{(N>qg~hdnG-@A;c;ruTKut;4;I1Dso-y7r!WRZyC!IqZ zX9I$NS9~U#2l$Pg*j~m0|mnZ z4bJ`RlpuA;^?L-+Xe}WEt;+Taz~jpR@xfI_&Zl(`6fl>n9+i9-fZH^U44Qa_XL?Pbwu@I{uUj6g|1Zs6^4Yhw-O|>7;6z zTr|+v3{2qmhb({GxNh;Z0RI3ACpENXv!StSo>BFl=#zgw{@8-1Q(S;ufo&h8r($!V zCx!4>716`N)XY;JN3IW@`WW$H0CF-kAWy|yf{Bhdp3$A-Lepb1c8ZmNd^QEPdVTwy zAnmq(wS{zIjK_0;{{VH`{S}~SlYw=g`Pi+S-s zTKy3J058Nb-rkC{`~HHyQ~KwwCBIR$`mg2ebbp`t&s=m`COkR5UHez>hbPZU`#;V4 z=l$pK;=M5lc6s}|zA;*R>G~Hkbii4sYIzq_gTr=Pva3cX#sHz}Q0j{z=2q`ooJ7xLez(y^t<+HMJj| zAq>;LrDHW6k@P5{@&GUzSpHMdwz1Z9pH!Z1lm~VEppz2UWupgfKF%1JlJvtod5W z93HTdmrPsBl>Y#Y+`#cPr1c*g2u@DXa$jWUKO5!J>A~{-Kaz(usekX@jSBME?LYnL3H?@Oqm>Lk$OJMR8Dc zznN#6{4Plzv;9gX3JOP=?7M^YaW{UPM2wg{MfrfJ+Y|gmgp@it*)O%CiMuiEx-XP# z75r}-bXuj&X_zxhn{;rM8yqP3?N`}Ha zM_o-{5%)Xao$LdV8KYJK>M2dY3uw1b3FEBZ-WeaiHFvZ79pEw*aTKJ&K?^f@IvY|{r zw%H}h1=>41p%tB}e4OK|h?&UeG*4QwT18Ge5>_aYvE|4s?}V}_zxsq|d^i2J__xB= zlQrwC*~jQ?v|zQfE>qvXuZGbQf{v~>ou(dDCz^8hJb&iWomJ5IiwX;-Z%)f~F)lSGJ(pklECO zAeB(c(7-fT%*0zH_Fm!3z0JR$kg%C~B;+T5FDM903{NgxfLvxY(|+#UPFch>cU6o307zsKCvAI@$x`UU0O_)W znVE*{=Np_Mzn*7;K{Bj0oNSaVCZ*}+SC~l%Wp|ayA5EcXo~ttKhjbWBc(OF?1N8YF z-%OItq5ErbsTF!{W6XAArHEvlM_|WWwl(wKeKtLwJMiX!0Ro4DHOaz9;;i(gChNs8)ea&}=AOYMlc0 z%Z<|W7FNfw%O|cc9J$&T33%2TLVZA}*tIIPJDQ|$pSj0(<%-G_s1A3l^#+GU!#NnK z$RktP$F%H(;ZLnic!zCr|VRV6CRNz;7 z35Kyp0C9!D0|TEb)B#+uEv>5KYF`>{wR{ES8SM)U>?b=9ONEhVsuBC`@K-`VaC?47 zN-P7c++I&L@}D7!xhAs{Vyc!idMw>=LJNd$=+^fAD0xnsXNx?}gWAE`!gnXQ2dM1Z z>8xf-#e4p1sCTC#3$m1X$fxww@AfI$%d(m|SS#=qA)YJY5TOXDMe&76e4WGjP@VmE z+#rdlw|p_iZ6IC~QxP@0ku>ZJbe4EobdC!W!rKq87=?`vH=g;&Y#eCaI&FD~N}9rl z0ya*F_QA~0!66eA(m1=^I2t^W(Cla{t0?9`yO{}7^xcO0M!r(KG1ZH$zV0-(j{Hf- zZ|lZ5?QMATW!EL#$}Xbes}`>^&ga!LLxx-?Tvx{U&u`aYK3)agzgDC@gCcYu`L_Tc zxm0AwR8to@{{SjOQ#}OC*rcd|-49-fTz5EiG$Q14`PM}*QXW6ghl3}iAfwq#_iN{V ze4lLn>-c>MNzJY-n`INEz*+pcT^PBMpY*5q!R`fShv0UEp-|`{N*Q(oM<98^LhU!D zmW54+U#9Q+&2sk}!+o>R9DF`jALPFsd;t%@eY$dgbpGG;bU$5xXZrmk_xJ37N1|Zg zjx&evdN1ShXXxjviT?oDlxdyY_EQL2vYBHMX}R67pqrzgcB{~h$CMBDa6Y$3Ba#%M zeE=o*@0GwbREASs6K_%L5%e^*ME-sEj{+kjWU_j>8mb^Qki-r*l$S?s!9i_KnE&-XA=Zzt|KUg zqR$MBCew!p5c%=H@Mc7b5+q2GB1DN2BuJ4QeM@VJ*7@&9ID3LSgwRr{7r95N<{`1c zvU6O7m`wo_eH3lESvRj#+)NUJ4Z}i{jl*B29e|eKAt??qYS+gi3+Z|YXqwIS2P;9q z3DwQ%h6Gb6^Q4tCl=4MTRORp_9pV!9{d;%=Mi3*!@lUIS&J=&ffv2pFqYSs^{j+;v z9GL)3o&vPNF|Qm@zoO7+t?ejgc@HNibb6jP&31J*?r6kq#ncO37UeK$0REr})9-(m z{&DS^p{JM9hFUr%-6TIhd`{JTulQcvAY^9$0GCthwz?3f{7EszL|{8PN^#;9I!v3s z_LD89bUEs-sqrlH0zCY86))oEz(rI(alKnD^ zd1fr=f(hEV4H=OWmXNWn#?~{xWH^}IpY~%h zfY&OBj&CMzuxI=K0C)(=kslWI-O%&B+b9JQizBGSc`aJDW4I;PN4Qc!<(?SvacYay z=pb?U=q=AH(*FS2!?o*~BfS`0tbUpt!Kvgjuz!k&Y@LWyzaazqI+m3d$1Le6+zYbp z+R+gL%j$(Ehc7b+$)U5ji^BDVG|jPeeP4!3R@YeO0lFDfcQ;XD%x=R8y4kugr;j`t zF{NjBTRX~la5H?SaAV@fl+H6?meMV-$` z10G|<00!U~RPWN~W4FKuYiSw3q~W6_dT}`S+A5~@eDA6ieVYO!sPhQ(HvCVr<@PSL zpC3gnDeG>kO*eSn#8SO96UK_QCx^d2sP&3{3&vug*~?caa_8C1W8Fp({&4K96(+4M zb>WTF#^2Lo$6e*vh&#+XM-@4I0}M2LHc&Acz3?8wL^rGqfq-`4_HIjED%BrdzqHZW zj!onviwvVGbDh18OPzTfD0jOXImIE^e*TwuRLD1B-+nms96mQPX=ka9Mt&WixDu`T zG4$v_K_1zxbc~Ndbl)VW$b!4^;|@H2njJ5e&a-3Fv^&6CoIi`Ol+3SFK&bksZU}k_Z;P-F{{S*sNWKL5Vb|;_?$6CRdqW>o*?opiz|*PAn{+E? z_+wkwt$1kaOq)&l7D}{rItd&-*Aw%voW7H$!<+uU?#ni~u))Y3+8ny1Y-V!&Aup_} z$H3I~34^u<0v&RSw_r&NSv~zC~`h{VE$r$z~k*!A&7yvw& z0iVd#Qleg^<{!5I03T?0UR@Qu{{V-ItH3BH!ob_61m(j%F$=(-Rw=nq#;mx)w%kLv#b@ZZ}9q)W937v-2UpUOLaKj6UINBjjv=7vv5B3Bij7^IDGN}@Ob zZ&LKa&teR?>J+oj>?Gsxf5+#x5DwF9g4wufdfWL=UZ_3uTg*I){#Pw#}W*^V6(34a!mgKfO!vDEES1^(4@_u*htO^2y{pKzlNA$SwY+d zpE{p7{IS||x3q3%3siya_Q-hZi6j=`X~&hz3YDnQQtmn#!=dcr?ZRozrbV;`ZJCe* zoFusIFeDwbN(_P;#Vi&qh+X5Bjgu3`$sE|xyr?{YC?(me7dn~=ri>ZuX*gubg zXiks6+5Dc0!QkDWPf@99^dC|9U!Bk0zn+|bVE#W(X$oboWUp#52brV|Q#QrB5-sig zL-_kWv1ZM>p%G^R)ELAa8^aYejhCQCzm@jKHN{}bCjhpGZ$RMspi%^{*zm4ld*?9> zoOdAuNsXgO32TvU(~Le(K%xxB10|v3{RR>t`9ul0?;nPDFvbJksaLTE(=TV6ZZ19v z|5ZKjldPel$Mv6@zgCc^RV8M5>N{2`324$Ud@}cF7MA**`cT`Fk0@x108@!#4 zAZ_4-lS(sP2fnocwlI7r#(XxeX3gz`ibcGd(P0H+lVl782dWqQQfu$VRiUUld+rk@ zc)n2Nq9ctXQMTspi$ahIm$^@ID&WiM6aZP*cMN|f{lAkt z-o$pU;9o(cb77*i@&N0OQ!-<4QQ-?Wz7vk{kIhwd4PmYcf^PnpfQQF?9g_+;00&jM zT+Iu5i9ZEwT`UP!mAxZJn$m7WgJ}In*Dw@oPj)_|3G>!TOe5QH*fZtu_WuAQ&Q^`a z(G)TWggO+)b-)^z>k&ud2TUrd-@_qgzo|d=9+%^Z>og<(0Bym~+SuIsj-vVia{Tc4 zUqqer5TXxbCeC=c5Qk3EhtpR=Dm~L0hxv<`>eggyr+A?i$evr+aq3X6bhWu=xEjUz zzAVB-KsW};1!s()YMW_-w*k!0^fKtgO`J~Nnt0zBn1v2XhV*!Vn&4^< zJ8-?dc|(IC8@1%b`~I}Q-gA+A)-XSOwfIqd;_=?yLs>eSQE>X4=DM*?dm4gv1kG1Q~>U*Fg++*sDtMe zkHws*fMPTX+L0`wdk6D|4tT>cDbOn|+#n`2mc3DPuD4AJ(M00-<+Q3)};BxV`WhU^$NG z7=MJCMC1T5b5hbS1ZZtV*#QG~@}F-}^VRicKaRCiuB}5Y*Qco}v%XJ#d0t714&V|1 zM_!SwaU?*MO(F!0I)0DpLUP1JoGA_kX$hP54yUYjVwGr6aCMHMGm;hCqF&|@3fmSF z)HD{xzP-_^c9kw}bcKYXSN&-ipH9aOm^mau+}?g=zX=Y_S>J0Lo(-{omaUFn0dYVT0&F?gCgAO(T7=_WUseh)H zNlyqaZoJSju=x>$_cD+SFq#iK@CqQA6qdr!1e}E7-n#V{aNG10-h{@CS&Pr-hZI>X z8EW>{`8x+}Mbe@p@grSgbqM={N zv0PeCe?k+$J5VGc6$Ci9 zsEJKr%ix%v4}su@Q$u2SSLi!3_9$Dh6?|nmFE|EXdB;Ycl~ld*7qED`v4$5)5tuPd zTSB9wVV1Pq0%Npj4tCF_9u0Bikxcha6M289rk+CIF5Vtco5=T z4o?ZE!0&~DjLqP1!`<%^RvHS$;FbRXXK3EtexTd22`KN0>_B)^f z5v%bb>BE);VZ~ILBFzJph5d9|aPYi=mb9 zWk8v(9`cK|`LY1@aMBOISJB74r1X8``~e!3>Lt> zQH5exlaLZ+^Ph3EE6ebH-x1(WCnEr$QMWFV-k;~N)NTjrH6QMfa-1wkZZg)QP^wz2b{h*S3_@&xV|X3A`QM8RxqW6v|{cit?c?1NP4QSvT9 z(sv1U1Yu-Tli*#}P0XU`SL71-x~Xn|lb%23A4sS6{5OX`ALY`S!SX#nlk(|TgXQ{u z(O3HFr6t9YlkC1&L7r-7gn&EpswsNwRun&TcfGAktcejbMY0J^pzSbg?*w-R$xy#o zm*M@-^OuEGfmI<(fIIprgqcV187ivBxn@;k^QHI%i8U^8n&gw4$9%30zO7b}PjEYv z-o;|iz&B88->+Z7K*tzJ4Its+7fXZD>9LOb>|-<(7y(vjOjTyL#|AZI@E zcsogjeEPuEAg$yrTBc1^vS^b5w`tJ4%tg}z)CEVq_LA-euS?A{bZLrXXf6#ken0sA zyk^rKjY-H`x~D5${xNZonZl32asL3(zjMctK=kZt!9eaOkx&msx0pO&rT6>mDy&2g zG|bv(gVJJjp`VXVa;KpcISd9%|{1!n=Lv9XSC+gvA%=+j%$|e3tRA zVC9~yw4je{YTO$S9~e4T&MI6p^?wmW2ls4WLix+!>K*hzPj&v!_0>G@&gj}d-}CVV zlaisZzSGGOkYiJEi_np(1ye>34XA`jX(C7je<6P@=Ms=F9Mf z0qj$fP$`Fgy-DWiF;`0;+PVOK0tKHfO+WBQxk5kSsBaRXLe{Gl>)M<_v8INGnH5?z z=aiP`e=|q%pFSDH+>O+J%6dNnx(3^6@*$W>=HRv6vu<%Kk;@b+K$xJjS(&hO`hOqq z)x>Gz_B+x&(b4=N4g>;!rd_24>XJ1JC4mHcSP0k+^0k;3Y;4*1;#yihVi2qZz4|z! z>)~;=v@Vtxj0*kA(9wvahrpYcKgm0hh)@^6YQK>r{`#PqVvRV9rt~v~+H9abvr5g5 zqG0qefEZ+J$U!c{y!sleSneC(g>ZrdZZ=>x5S(?b^afHRQAK%>=Wx$d*s6Z!1MYQQ zL!J%3Hq0=L=;$klIF(&8AkJ3|vmN;v77Tl6SF3Hb{{W2hR99@Oq;ti|K{EkshZ3g$ z0A2+B8sy0t29`Mjk<>q5;$coX!GzC1UhL$FUUrz<5B(jM7&=9JoNBuj9eO>M)H{=ubpP0%01 zmaZ_(MH{k`{4G)LI|PxdM{S(E5M2{2%L_pN05nddPJ&7#(#pK#sm6Ro83l0~4cOwt z^g42^OxyWfD%}eU3tHLDS&Q@}wln8KmMP_D`EI%w8tSs#mr~?8e5XnK+GqKJ()CYg+Qp=4si=0 z`uct-pVM^}^0qXN>F5rTe=a8glDUmnS~WN6O$tExJ_qn|J7lAEs%!j6PzoSbT>u+Z z;KvOHsu$fGUghW4o*h}A-zl&Qb6F1}=x=<0ByBOhYi&WM0Dp1FApo6e=8g?> zXFG^CaEq#C;Uh%p4lZhK^{}ZGo_jq|{{YXkPT?sv-*0V2^$O z`xuQ*@v$J}M^@?7dWfPYwk-I)W2NFg};5cq=-R>Dd1&3qEMk>Vxb^Z`21vCLw@E!4{7ly2zgQYV)Ri~mW^3JJS z<$q`V)_kM-ui)xs^jAH0h~cH>zc-BhkAGbuO;6oDIQM%g`fMZt`&94mVF-1Ujk(}> zO5)r%o4$q{d{Tk;U9tJ=`Q$EVvkU-zMNt*>IC%Jz;Xv0>Sn&NP_BB45#ztKzQAS$2 zw|t+G;&X-35Eg&rB$(ycTQ~sOU`;bb#CsakQZUoN;e&~5S2|m1Uh{P9R!(t0ngZCE zE$jgSi2ndmQsLhQ?i*m0pi@%y0n{J0G-)b3eiB z*RNkLT)A@P%a<-(xlJH%I=?c)S9N`N{m$;no$E;fH|X#Kvx>$1%-8U7u-0#NmpPiNZ4XwW%vnm6}_DNr})C3kr~EAfZb8acE0(zOdmD{YbroJ zU;-&L<3~Jh!#Mld@BR}qci>uNpn@ympZ@@}iWgf0C&9op*NKaP=FKPaU=GRgKA7$+ zo(%V@$`n+Wz8oAQV0G^wTv><&fRt)bI$|-kWb|={m?6hqm&yah@6K`kHlOMI{{V&y zCZvCR)b`_cgJ?M-i`P+~hC6jen@(>49~QRHCg9UkntaJMM?FMz_bTo2xXBexvtEJshL z@&5k+LbvVhpXkoJq`d+NE7>&bA+kOod?)#J@I#N8u6{}Pm7d;MI-IH$k$7qYrY)Cv zkrD#5x*84oMv)^Z1nQe>tP}Z1?7y7TPyYaJb*iI~ey)aW^qm+}M_U}ygYIq+rs?9u zTM42pzc)yDN&sBr^KqNXDz}#Mvkagm$5380zE~*}aL#L+@xvIlCzH#Ur151B3$HvM z19_b$&8%4C%IJ|!?!&Y=8<&EY`bp@jQu-M53o5yNm8x>CVE+5Vhw3wkL~U#v=go?S zpb2+Rik}x&HCjkh>BwC4WgTE@hB*OyKnZC!B{$3(5+~EG)B>|qF)DxI1C1u?_;wH} z{Q$#s^wz2`P124b@L_vX0i`NkK(M&|x9JlIxQ=$+YXyjb&LDy5WJ5ljh!ke={{XtM zmVg5337xKr1IRcE0=)`3aNVWAcI@?)@T=+j1oLq~O&wVn1QUfCWyBf~l)cTxVP2kk z`=W@Df=;R~KUOAmNkIo-MkvM(3kpt3Km}$lO_=d9Tj)XgOl+}Y$j-NDFo9E8ZUd&)V>XV7@ zKyplYLzA5m6%^XyAN6F-O|BwbdO6(dgZ3c)ga=Zn%@=ipd@pZ+IXD)S!N_GPi);tM zm<+-+-O8yca-{SAHh%aq7B8y-HX5=gOUw{xRQ?JRvtjyyztVUp9i%u($UJG)b38es zSFT<3Sdi4K0y#@P4Z@RLRsR6l>zaefk>K=Bknxc4daiL;R96~$%2?9$`#y7^T-faeDxu$X#<<#f>zLi2e* z;DfOjsxpd^GpH^I2W%p$f~5f*HggGtoW@fvL3QE3~_m# zPR!kl0nvhL5qRi&(%KSvn`+@ew_^pfrlYsqK+Gcm!J|+S%gkLn>tM`Tq}H9cQ;a7x z^{^TuN4_HS^6NYv<<+Pcn_mI%M6kdh~T@o(*C%_t3)yCzzg1Xpuluwgd%fb)=qvG z(?60J%q3|A5stGeKe9~j>8q*UTs)XAv|QGU4$~kTTLni!;*^TeCp-P^{8#qR>k4xK z_f!l+s^^VpfG^zDEO7Q6Oh8eoQFP_;XD;P6)f<5|TR#W@TTl0TxOh+JenlBCMpt|NHWV9mw}K>>!&oQCSk1y^f&%z)n1RNIy<^)6kI_Zk`M44_=GJn74DH17ubmhJqqfy>AY z{JBIR1Fcujp#u5Z2O5;vHiQY|8c38T0QOrgnTken!2bZn{J$;`6mwj1D02X5KQsQb zzqsN-p6$ZeDKomL<^<6Rv*7!;(~nM~g#;$UpVl@(wgp;*MmH05$PfWe+;B{BB*t#I zdtB|;tTZurE%)_#QBM!=k%LGS-JgK82X>fcZSAg;E`=(kvh{|zfhhc6lKMCK;>Y(tlwcFtqqjB>ZF z?>FP|Z|fX~bk#Lc-)V6LWru*as)9~gaH)>2TQD~Ed161Nx0UpE$)6$n`ag5^mXzq? z`!&$}{{T*`(fvQ6-Tp;WcMp$~mE@-Ld@VAQw^4s1<&Qp_Pj6W2L6SW5@%Ya!ire{H z{#?F>{MEky0NMI{-PyP|M8z~6?&|&>pw{iEIu>(Iy7ld>1oHq{9YV=M$^E2_h|u@}k+q;vC_#y<$`}&e z61w#|RA)TP9Fb+`c}{#>*6|E*H)P>V%kmHF3$Sq|M6p=lJLGaekMz_K20RMU-8#}z z1Ob=&bHeI#yoYo?La{}p#@=ib8(7Y}YO)W_U?g5Bn&s7wF7E&&dt84`d1Qm3tYK%) zhv2`T=NtqUyjNb$_Alvp7FaUs8KU%LUe^fb{{WjC-)WOvWuRhv2|KD(I5by+gg+&` z0QiCKZA4yA-!Dv-+yjxBhRantv^e8eR%hs96{%f?V&rp^P-)OLMqk>Fz%Wa|rO4(6s zbdi0J01<*FXG}oCsx%NaDXzG}s4-bo?o7~>25`c4Js%{xA6H1de&!)G9(%AEzE8%l zSEBs{m1qNYX-w@y_D+=MQ7AiDNjB!?n-3DNX*IKAt^pP#skP1i>#H zNsW$=BW8HxrF6q1$;vrvo1@_W06XeAA-Z3FozrqaJOc5X?Fq}R2b{&VIF?~0^o8Oi zee%lXk#Wjt!+atX$`I3IXtGdNa56+Az*7Vh^DP=Ff*8$YQsSxAz+IWrXpZY-x>k?Q0Qn9u7r1v_~X-=chVGjG9QG6UUN$XS=Ln0Nqx0W1=t8$Y%K*{VP zwJge6OXs0blKbI1^NH%TLDHVhP2(qAzt{$jRWXY;d`)nD&k9NB;-HiIZn!%ibk1~p zW)yXlr(~E19zh~uP*7p=)APb(WPTH@R__6iL0w|=z+!Hy)5=y*S+JBTp!nCC2#e57 z!rG+CZY}X~XKuJ3;x_ODmo6vb5&sZ_#LO9FGj4mT30s0RV4rD{XG&k+JdwCbcg#M^ zcg_$0lst9$>T7tmyFcnuO4(cy7p`=&l9v!P|7MNb)@qCFvaASl0Xy;J&Z3YOA_d&r z&ziN`E!Z+)5f+Xctzfj;;>T7nQK4CatZa}by2MD%25P`^ECIIrc8g} zAh?ZN`C8fMU)ygjh~|ZtdF6XCQXLgAf^3q7u1{h0r3mstrH+A>7aKO7$vL)Dp>T&6 zo*4m0I6pDgO6wOWbi5k6g%~^gUS6Q`Ca@n`tn};hZF(|I`{}Wv^)37^wdv!z>;dvK zzWCn%{PqE#)xP@hlLX5QxN|H)RDYq*3iwXsrT zKjhZzDMteesV-tO%-Ttr25yI2sP+ek+?pt3lu^sV&ziqjikAxUf5Fq1iH3qKWQn0! zyqyG=F&ffh+~`I0?4WcmY;=Cd?k^?;ngN~poHY-oj10*JLETzl6vwDW?Xw&ylMGo_cdwkZ@$gFr4tY#F#4-rUy+aEpU`Rc zF6vQo4~LPlX?f(P0L8dNf*FCC4KX;P!82f35gJBtSi05}gATbd1!Nb%(bstPZl~|y zg~MLXt$LjKf^n_|2+lBW7BGTm5n#Os-g02H~Xu;cZ6B;u?oQ4Fn6m-}DK9Ca1k zbfU_X%?cH!7V=-}cL3W?+DYp;6*)`}#`}$iEO#)9k)IA@LZ>w|*j^pX1W38Itw6%d z+Gq-1Kq^c>XGJk@*_o0OJ6#(b5F!}zBb#G@H4%Ua9orh&vML-7E{Jo#%zq|WFepG^ zWQ&Q%oajMjc^gRfJ}>BQK0i~glpxCw%jzO%m+cWI zC(ZmLF&d=uAM4+48?K2TC!;1ne7GpUFQ}hf-qLzgM4RQAoc{V-Kjr;hvUdERW(#(v6lC$$j=${A(2~3~&t~va0qZmA<5T_aa5N5| z$NlH97iK&BYPfm?Ygj3zfuw<9oTeDE)-vJIm^GGDiUr4vh>)qigbGt@bs8S8#+X83 ze%6n8kfkMjOe63pLCyf=U-pr8F0LGaO=3fpT0Y-XUsk;Y`ql3^k1{uhvv^Zk%v zsQN)hI7jz-uo)Pm>0+hMpdN&VMcMrsg`kPMddKDyzOpFfuT$I#`ZQSLVIOF6T3h)1 z)ewrbx=9Fv2dKA+mN{p!T=Y7RVxK*L&?$M(%k%PqVH-MbEm6J#pE32k&HfO=3G&?f z+bjGbK2;jxQ@6@P4(RJp8u^fIUanBmDrJ%i0aGUf`f4X zehJv)duoz%XnF(Y!N}@9z3!+u{!llLM*cnBQr2EpQdpItqGv!@44oH<%{PlU7C{2L zs;$H@gB6w86UhM@f33|w1Wo@B;3N+2DRF!7VpVPi=28vLaD7@YlMfp14X&s0{uJr? zYWpZaHM7eO@Rs=k{s%zH4S7M!A=Y6bYpCDHl-Q0w)4qzcp``;VyH}=3?1W#8jm%<}wO*W!Fe114e;#T^S@wznj-*l`AFW?lnY|%{QZHB! z*h0GnLU_>$-ZY#05VF&w=5;|8)NI%lf#J~kcHg~ej!S~~k)6?KZ_&$&+oaF>#%)3K z5hcDtSO|9X4&A?CJ)2i)0CDYc+VrlFm50d4U0jI~Y1iW~m%wy@%Ll=>F;8uDgdbI! z1W@Z*r{DbV5TLi{y6jkQlG#MW$g+jpl6MM*h`^J|r(r$2TUh=mk4O+iKI;6N+5 z`wUG)KlC0ZeAuv{|9?Q9F&u(TC#m6iIfIZUB*@8pd?^$VMCPVKnR5c=szSBY#-@dKbIFqp`V2yY%(jEj?Wy zI%)g*fozsEKImr0C4Atgeefr-us5<_PVGLBe3vMc2Xehl{GU!Kuwgz8v+=37LZkMpVm~i3YvGibN6VPRL8cs0;9{a=Tj$b8+k~?^Le`X-_ z1_%D;*lA(;?IC|zBO9?fhK9i>`0x^60ME$yOfb}HAd8=^$x4UH3F|cCM$`4?7GQL> z;;9smzX`;!4b7EIv=l~cnCB4dhxSc^)B8#b0Re}W5IZ(J-oLt;zv)nt2&Frt1;Lg7 zZj~Li!j6S=3__=ELMs`*;;apxzs0 zTxEi{&)_l(O4D9^_d#%P-a!qw(x%s%#*0|+7*18-qxJ$>3_=MX$1Hd>3&AI}_qej_ zP7O!t1S;Cr&{TF3nkcoB0~hb^XeFPp2MffwOejLKaT*WrZ!yu)jM5qPD;h9BbV&4+ zEyE7I=6Di~ zz^?M8A+i&^n-o%o%HX;ifwMM&MKpds-qMq71>yPx!yK4vB}kIdg9A?X`nZ~@7Nb=I zeYxj()2{*zI&F|CG_8m^=y=}eGQ~r$S6cRsmsNsAw>xZafuMtDMvuhTY{dPWLt@C2 z{mD7*#d^n=l(g_jTx0G0>9p|WE$54aCGc~0D2ONO$;U1y`R=R}<%RzAX)y&l7=Gc= z)ZV+(^#nxJ9FvYxc8bR4Ibs*JcO+0weAVxkmS3Q}ozMQ)+-8jyq*Ds0)k}Vk%39Lm z+4F!+q_8GsZWn%#8SfACPK6zqmk zjw=D&1W+@SPg#}vkqf?m%bgjTn zX_*B8$KE3p+tczH?12-?|BK;47I^gBcGSPFB0IEGT zW;fI$Lw^_oQ+ll-yJbS@Qjma?!LiQCWedYE$9+kIrY!0m{q?m=c1d>61tE-C`lPSE zXyw>n=To=yyK=@^=3jprdoD>zo}HX0K(0v|DKyJq(DB~NTFvZwK?<6*Bcn=VFJwGY zR4qCIW{s@7fpz${2GO+uxj%c*7C%@ht#TDY*Z)4_1)L$4T)ESvj2PFh?Ni6?NXOQT z*^j~F)0K?ekg(0!R6*?(;8Z2BE?*h5|||nQ&#SOqex6dJr&J-;+BB4wAMbHt#_7 z?~ihao~>D16XzfL&gS`4k2Vr~1AG>X3eBgM-5kgzXID>NpM?Ck1_?8;Lp$v_?I#=P z7P?s5Vio@_ClwkbUdew~P)2#O&TerV$+a%^t}cGbC*rZZJx)&#r)w<`3FwTD?v7;# zO z&hrSXCZED;Ho)Kk=i|TI%01`Cg}X1i+{mW4t|BOcLXFrYuFvPHM8cdRXvg)>KEu1KIXxnnzc`0v)fq{Oi4FM`th}_ci zKT?&mb*R)r?sE!z4(9Lme*jDcm9KqNt?A7G2jH`i|Lb$?L* zeRz1e13e%kdxsw0WviCRkqOi^pyQyEk9gYJ=~yV1_9H?UwMLky4folO)d-BAX02>#9@=(PYC|Zn%tG zO*~5i%6D1)@69+bff-DT>=mW=Kljw!y!s7uJb%J57_sRwJjeNm^2FvoWjE>fi8GWU z`_16c6PpLuP9YB^(+$|w!#_Z*##PrwDqnIR^JYniIMXZEdkjx*PanQ79f#X<&SP|b z?f605X3E`M=M?xeI`@6BcOE9Rfa2+2yPJUU-_e`34Q~mbeJ$rBk8oEy7{?J_)9LjC z-d=NmkCa~7SN$=(s*2p&W@E#SdVe&Ub(^31)x_}$NJuY(5qb~|Chx$#aHsz6s~4=$ zg&~y=ut~uT^}NziKiybEY9ehJ{>7Fs!O@_6YyiNV#pVAlxO#64rqPTONF<9$x>DCJ z2#-zRQ(rlOCY6NMYUPO5e;hbEbEwfT0hMNd$-S1(8Kqcas0g?jp%C{2T?1Q`a-LUO za!47g30PAP`qL0koispeB#pOph5RehIH5i95>_ajq4mWhmmT2K#E8bhYjz$*G1N~C zWMi3C2eJPH`9%E`23)l*ESk=7sUuX3Je4IoxIM>!td=((a0gOU*kDs zVMU{D*0k6G?$<$%SE1!aF{%2wxRb5Q9*C2}PD^S|%N22o=%4R#h%nPIgY2v9sk&Cq-q$ z$qQhl5zqz#UN1BeyGwXNeQ}!y9-cp372VV$CzSCOg0dbqC;Z61KEJ3pDr@ZRtBWb8 z$vpp`7#;%)jn`Rz9fDDhFX0#wSx9U}5${-n#Z7%fuc&?-U(E3&GMNk}^~B zHW@}$3UT8B=4jJn##?=$3-M(viO;;9t3aoF;vESmL~2ju)%k9IJLpMro|Ur&tqS%J zd<{Iun;JMsZ1i^szJO_V%fK#q{5eb*1&EbY0|>g+={H!wmz9oiq+14J!Tvghnl z^|cFvjk zuT=eH>_K%eHa(q$KQkzcs40M?sRbIsKcL?YlF-4&z``yMuR=M&EjSQgn;f@C(_(6y zKQTCYM3Vd^3L3!!KcbjZxNoAliN>4T@&nEt-a~9#2ZPkRgrF{hfS~MNEY5FqA#?5h ze}J>Pmt+@NxIy|a5U8^6r_cA7Q_htI1ju>Z)>rX^c>^e#OTBdBDZ_da{jmn|O>yp_ zY5b&LB_6(&KlY>57r-b3f*!7$*3#BZI>GrloHL8&BmM`~#mnI+6^Y2tek=1IV3U6b za$^_TAvNc~+7347)gd<$|8A5Is|;c%$6$zaS9~xSxzx;r_f+2UjWW~qucwR4Z~%Tb zCOk0Wv|xa>-H)C;Q;NKrM^0x$V^m+d@3qTb)0u&xG?=~%XncLWZxPa;TabPN!4ETE zdfVwuD)jC0GwS)Z{(p^Vt0l4J?JMq5ml!cEa#(P;f$PiS{yD*%WDlH8cbIC_+}hoLn3%Xxp#GIowa+ZkU7lM zKbR_;ld1$}39K~W^TPh54g~-~P;mSM>|wh9gjSWaB9cIr>{;z06rT+Z8)CowjXBH@ z#z_|_1CdLHoSfWg}z5DoOFk8gL0kD^~=iD30OSVbqUg z_gDVAaDuFR>~mS?i=nD;{Pl>Gcy91@ry*;8;zuga#t(>biqJK|Zq z=(%FekphfXxJO@qCLdwpQ8aSPPg-GP%Mze->!aKE+c^vyf@kcVfN75AA$inRU1GOW zE`)3rtmVa~tnU-7A$!{UZe~9M!2m@Y{v;^&p1|}i<{4HxYQ4^j_YC|Ar1Ig+Yr2UCz=R2(}lqbIuT-E2%9P9>r*O?<5|S-Un?#(%kP(pFUd5UOh5Ew&oU z`ZZF#p<#3vjq1h1&5J0f`Gew*9r86yJ!i>INLRjh(V9zqq7qyul{0XFY7K?CCKlqS>47Q-`>mW^;Nw-1> zcp+l4xpsl|-H6$y4fA-zC@0?6u&wr-jDG-Es+!ncqlbx6%RA)iz^Ig#T3eD{fs|nE zNG=ucS+X=)2pN6C~9}MZCnH|2j9R8V}kw!X)_~kq!Vw4%uuG^Q29uz;glf z{eb;^-i8m%5|J&kUB*a7F!R^ymO94R2m9C?h^3i7CC_7eQCD3a@TB;&qT;fg4qh4^ zr?^Drfi?`{NPlLCH;nDFn$~dcN^%7;nXx>b!k>=B?~HZ}7(YTQZ+!t_QrbjCsuaQVT|Yt!IgK&Xs6f z0 z60o8lDXs>1zt)>kcS^^L=t4ix|Ekbq_pDO>!s)=xwvOze&Ma;H*xm@fWCbvruaXwr z)O%2rtOYDDNsMZT~Q)#vHc{5sihC%6Qr+`L^56{|kG^bmhX_*AHZRN^l`Y=I|^@jL1U4^8@jX&xp z&y4@tA){1TR<@mULr7;0rDJc{L(?mN9-qR*nxf<_#Y}vayTSu@^%9zb6h{;k<5d zZ`R@u-6bKn_hCI}aY+)p6qL92EI2}oE^?%h`AxVTqwFshiJM~QkH%ijzFTk}Agp>t zg+l~0z5e=;L#{&}f;0zH`^n?;emQz#4-U#F@>szroot7(BYud}fHILCp`A}VqGIJF z9QQCGtYJp)1=o^Ed0EsP=}8CzIOj zj}ETLRg?D0^{$&-71S+Fx8UFrCXk*K0bI+^{hX?;?q^glH&Y9nBZ8y=3(}#cSs>cS zcSvsIAUStMe)zKNjQ|35qxk#!+gngAwg7k2?f@v?XWf&wr!RX#uPSb}dHU$tjyoiTHw<+IkK{*@zsPPM~ut2tJ*B%R> zTTyOp9R5x0D>|L}1mmZL?P!c#4uYLEU|9BgdFpDE^C*oY5$OeUR~4zYD&#ROd-Il?HneCSVI zSltd}YuCWGbyLL!i%2P42kb6gG!Y9&g&SMhztYNYs-?{Cm)L zTFEpL5y*K^&Vw#_NxBB#t%Iw=OI8$BjI?UZcG%MdK-wD66|UsT_**8GiJhP-Z_(^^4>ggI==d$yxsw=ZTj0IBry98 z$eg-tzYMtwodO^b&#PCgN`lK8r8Uwo35R+MAO#>1Q1{n#T+gQ%kL_k#;cG*KHt`Id zdO#`)cv$f&+gi!OfX=SR_54anq;&AK&Tik z^s>=}a<2%CGxJ+FY4Q$2U3d$%ziq8mon!LKv1B?|pzO-s2;-k+RM5?{y~EjofCJ-5 ziU&~Af;72&K2I`|#GQ;XYQPP|_9I70j`jrSAqxxLm0ddDM%oYO9c}bn^BtKZLdkvd zUv_7q0KZTq_wT*9W>|>#cl%v6pvp_FQ(bxXKl;0~=~S%9xav20N4ZsZKSTXqT7JSf z=@9>u#R&`w1_}c7pU+PiCjg0vQ4mqlz=1Fzq2QK5pfnYsrM!Yh+T$T=|iT~?`x-m`h=v};*@>p{!xNTK?spkFc7$4*ss$6q$S zGwa&xl--~aXG#tRNOx4?O1oNqDL4x{M$*J%D&D?2j815OqhWPQdmz<4*vrs$hok{#>R|@` zk#XJZ`#6az|37T}S?ML@zpP0z6^Nmmry@WR=jiDmxziX7t55M&XxbJx&BFEF3SLE1 z0$J+mrPKzCd960~-s9 z$V5~C2`}48vE=CT$r>!_ADvYYgAM3${p@CcH@ui`h-vD{-Z-p^qpfC}5=OGEPJ_ed zP+7+IURp&DQ9;k?K|-;an!pU(bSo+S3B)$Kg>@kI3`d4M&9Yd&z{ z9<_ndLnbRqaH=`6Rm!U05>IZzQK^*(T@Y~=Gd)?~qn>>n+;HNYJ14$Gnwge~)R7a5 zs5CsU(;!)f@z0WdwrN)cnEU}Dr=`=Qszn8njM9hp&g?XBdgB)ojKj=#Sl)Ay@B4O| zg#v5Dovt4*xM#;o1^T{W9vwlQ27GoiPs7(I&fxI8Ja5-U(4ele36Y?eCkkJh6aeTw zZy!R7cZ;~P_Vqa{4altoH3q733$ECA0o=eW6s&$@ghs^jID~t5hFEmberv8dXVD#K z5GW7#8t^bJl=Sh5({AhCh{Le_nx}85(MdHmcv|yC{nLz;gvkI!mf;%P2<^Cqjr$v3 zUxiCBy2(Jxo~Dene*j=Ff}=e|D0t~c;i~n@kmf~VSeOFOE>Q@uB8ABpSlLP@?#bjM zH@7Mh_pYnG5>q;A>J&g!32Jg6DI=Yk^&CV~!)~Ap(iHlw10Z;ju}abUONp5HFA1k0 zy2El=E%+#Dy=hZCp+$kT{Xxv=QE|K4fak!<*E#m`dYd6>AHHON%up?2l@RbIZb#Ad zXD>LM{!7v)&7pCU8PO@0eJpEng(n#xE($b!sG3CTo9ma|YqEbPfV|D){D*Drmj&T4 zQX@Vrg5W=f&O!PIf2)^7)HDUh-mNm%QdX)%vQpOGq)TfBFc{#Lw#;d9CiH<;iL_s- z38A2w)gPiGt9hrftK|~lUc|(>k@-fd8RFEMjEkC~qf(gW|2QfxnMa+6d>~}g(FSDn zt!ZCL9l(_w8|l3IBBmr&SQiC7`Jj=X?&>+kNt*24p}sfhD?H%F*>6&KeXpN^6h|2f z$FaMV_c(w{WGJu4*l9I-{6HNC88LLb=)hvHL`d94~1;Yv3A{)Z_onXB{NdQetjVgcttXR7 zg#GcuSu<}*$sAWI31eJchws(lh)cBT@&8RaAVn{>tJzRc`sb{tS-8M5WO;uH*S=7B zpL7`Ecj9t!^cU5l|JD+9g{7WNnME~*fVCCWXZ}>IEftKYkVq5aaOx)CTquVrt9S$Z z4fuQ$fN3{T+?n@&O0xd=ljl^ zZCo9gx=4(&l>AtK&sOB#Tvg4Q1XIcyB=%gYELuUT28+(P=6lso-uuktVm>HOgaL(3 zPa!ZyAR28^+A_dG0hc=RbMiM9xC*XFsQ%MkGYmS{C5ro93MRO>m8zfbI`ud(nP4@^S8~8bYfd_JhmBq^fgFaEwrIRH095 z7eVG?B29V7`CywFfK>CDMj9ZVLGjHxD1o>1Zp2+4IVqCuwL0(X822HDVk8HmU0uOV1(3QRn~ATC--{R4$m!x04A!BJjA7%2 z3nolVCP334@O)(WmMJw zk1}QOH*{M{(wJ0PLQ=W9pX%riI=1R)2LI@+dDq}&+6ttAb(Yi8MN*i)_r!ZBk zW$B8)6cragdub#Nb@_OPoX2W}Gin`G6v29EP;O94AXaRP<>$={(zFvdOQh-;ETmmS z_M#3GU46BBuuSZ}I^DK99q3Sw1ONU$do1?7-KAjmnulVPZnbKk77z!;=JlAA))c@& zs5BdAK8(G@Vvp^~sq44Z94z1nT%3A(V#3yvxhuG{3RcB`sJR69s>@Q=oG0)p+`{5d zl-~Y23sM%HF>C%4zHy>ED6ma%SLH^P^Oj=2`Mj})uUjz!15O$(U@uf;@f1Wn&UZ&4 zpv_8M?Si*5W%j4w)gL87!7M19HF>Ojw&p6KP# z^hbR1AThzT5C=(iC)@bMPDC99Kq0C`+pqc8(x|HTdk%Q|p00jkyZq z3TV+|N{?+-HJmjnB zHnRi8xyr$8%MOcVRh2NB?j!<{oy@kDX^78ds=kNt-Osnw7+!v6Gbq~{pRYZ^N|FHZ z4z&bG+>5*%Q-VgKMaI7br_(0JDQhZ%IIwHm7+3V7BHF+}{b1si*|j@#on18&9ckr? zkWu7lQJ87}fZYQh;pHdJYeOdqb41&rY>ZgDMmLS+uvrd2Nfyi z_wC7r*JpZdBeXV(j5R#;g2EUMT>`M|L;<%>VFnst%v1D&L~I9C`|MMY{sSO`p{COu zf$d?XTn))WEyvSUYbvPel8Yvu5B#Op@yV#5)OO~QW1yh?*>7P*J19wQz^=yjg>&Fu zaJu&G`%5M@qnTBtOem^QvgBBpnc8q!uuE2!!s%uH1{>i}(Q@gDZ%;kOhQ~Osp7v&p zMCEi<&)$c>G=**T5Uj9k?-jp0#<^eRi~c*Kr|+cLk$+6qP!9H2dNiYU*{zy`YToYe zZ^&l&yTBQ#`M-$|Oi#IMx6H!Ry-|h>Imfp80Wg=sgtwrILcPc*KyOoK=w z4&7i&)*4_HBVzFQX`}K0f56F{2%G3y(6nVgE=?7SAkc7pHj-*34{wSPPo7?I!+NTj z9$bWK0y8lWM+V1_B z)iuCN{89lOl8We0ExUg8h`s)^omL>~IWj>VlQX~_YRAch^L1w%6PV97!}x;a231rG zFiQUXw=S1vV$I!XiC+iFkSM_KPX73Ue`*>9K}YuaIYn`5>#HO#u$r9qpmMIJrjS`R zlhyQc4E=?|zFnwdf_S`*b3yI-+pD8xg_?5_Q}|xSqr-FJL9nU9uZgr~KH_g}k*p zhW76mebFV$R(BM_vxhxv(5_xcM~!KEG?1CL8wD}!W$Gh=8x*fhavhY6wzn>SAnI_; zl_3R*PzYhKzRWQhY<7TEu?a|BP!y*OCwe4^QX^1L?$FUsS95v2znU4qfZFWgd`WA` z4TE>grB#s$#^VppPCOiZFmnMxS%Dv>g%zny#15i~L zr>G?_IpO2p#kj_zg2#^jDNpsVQi2t_aiw$nxTuV2qZVWp*k_-(fMq*f;Aj*A7Rm!o zBJX@wCUC*uAqMyVXj-nN?eY@yegQIGOfLO#q&PR zXcD#T!ReRORV$aw7E}`93d5DUGg}Q(E>~D+ONO!!jV$}|dQglv%qs9%P?H2Z&z3x> zFbramXIP_`phLf`X28=+j=4D5!c8~K0>{)b#v*BGnhHg0T|}H|Kuw*zy@48xBV1CU ztzDY4=oY()1Lx&H%Hf`7>Fm+Bu{(D*-YgtH<{$G~IWD|U!3jK2XQ_xw6@a4^FjT-Z zgI}>gh!sFwQ~=B8Xmkr(OZq~GjCKttGx!(MJJ5eM7nVCHK04rG7tuoL@+AQ!rz0tD zl%=-Apkq+&q-kuA;McXYLOE~Y5&pRFQq-B{i@=Ll2}ZT&K#Ny*-KRahzVa5=b*|7{ z!56;2;O9U~W z4R1|Z{ER#1q1Zz>_74h!H2u-2{PULuwuu)fEnSCHVsQeadeWYrCPqp!3`N@iGTrB2 zRl54wGP*FpblLx+g+KzyP>?BTYW}@N&GJn@!g@uD{$qgqiXyM;5>2kLRun=Du}UYh zws_B?Z@rR~6=}6$TQ%8YgRI^UP))UV>o~_HhF8TlNp=HE$p1ZH9*XJzBvRl+(H|HS z%(4cz(>6_4^Sr`ODL_*o0@BQ*sVG^2#vW`80@QmmshMwZKGnutO`}Ga-`bCYcn~7I z^^jqk9b#Sr@)1cHhY@37`{!!y!>HdYS+p^7HQ+kL6uN4`<;mo`MFYJfJb=vS3yL{% zGqcUq-~xj_RwEcWGcNNER&wd!89lcXtu6*3 zwP@z(h8G_vtpwbtWX5JpcBDRp?&fof$6?FVpCU9u(gx>IYFK0~3c*#;`kWFM)eY*D zO-~BulBC>E?JLoxsY`6_19$j?tlBeak(m2tdY^jiW{&`vFJTJ8kii1=Oa-}~ixCGG z#t^+^t|!%rj@3bCk}<$K=zi_>vMG~wKC>w!0rbp1ZkEg+_Z=@9ZjRD}t`-N6>c=_1 zz4@)N-@${+1JE?J_FZQ}eg~J?ep|XKx0wxKIISgj0TzS#zEme+4pWfV?FCt5%s;)#>#x__PDQ|N!z$HjE#yPP7&BN(& zF~ydHP1FjQwb@6&y#-e4mco3OJ%)rVn4p%<7N=8YhE?-$S(9;UTTunLEW)edaW z!o~>W(lku7rupOBLK#EZLXJ3XhABaDi^rYlEu@=3E_&%&__a0DoPhs9hV?g~3-YfGWPgMPX|+!t(^ zBvBivDCp!&Dk)yaK5=7?;R`|du7FT5yFV3Fw5`^atIeuRlh-SSalP5-1Ky&3N<=rp zNI>6fq+8*!kY+im7GMEs_+cj!jaoa|gu8-a482$VPry>0ZZ{-f%cF2|cMDR3NS1*G z#Gv0yOWwjaS;s#>f)#(sr>xVX#!}rK4Y%#4;d{ZrPbu$5O8awwUtcqrA$NneXpVUo z-cH;{^6eOS{)r(Q-!@p&$nOp(4i?OksQCw2(!V^v*&(4J3k8zNWroar9S1Wx>^C`0 zt9fynQh1BcyI}Tdp;Oj*-Em@(QEHekbd zO{CNp6RGk|HOajvbt@dNCYhaIf04j{CZvM1=>(GnojOepR36d`D<6yfT6wqlpMYL4 zD>XxbSrSB%UXZJiIrdbc;y_8`GfS`olneO$4b1#PNEkpg(YcIZ_134(z0qa@0AKm0-ZxBRR55og zO0Q_FEZNQ3-d^CyM{HQ~2Y|!k^97s_7#ejO?aiA$!(uI-wesc~&!X?bh&QG!e{BYZ zk)Tq;KO>oM2_cCL@QCDx*hnPNFLEO!XIvEw;qBZ;aHx{BB--Y)Zu6qsq5eH5#3 zpLo4`uaJ5Q^c{-tV@I+^EKsV^;oNXaPkld(Ge}ZoBo-`QU{5Rks3<_Ad<8*Gb|l4( z5GPg~``^JarN2+eb=) z*OY(ZYkUIVjO4{lU+BQs`HGnD_-M!FS}RA2V+<;S6~n1&75l1TKAZ3PPzl;JO>g{C z0kLDp#%JnflX7ZB$N3e{N;Iq387*!sd`tKbkR(|uR0G4Nz$b`hMMnju#=$5MCZn{V z#Zez+c!XrG*80E}Gr|m$`?9qowtvlgj#Xk`Am=k2e&>teyfIWNZ7P-;5Y~(lwMuE@Irf<}tuHm-)Azr7&< zTL}0zcy@ZTy2EV*@(XA!W$u?2gY-4~u`~#r{{WHg9Xn>;?>?7wAwMVolKl|HdD(wj z?GHU_gd-sX_tNc*8*$*2OhjQ;=P*&lndht^+NT9g;Dr{5bs#id}7@S19Uy|or+ zIrY4+k_-x@AI9YO#=s*In0m`-FT*GZai;;cNSNdHjvqN*?4;I#HY-V^9tMNe!gv?E zpt_Pvy(3t-0sSMpYTD@K0_)AB38MbIwSM)L>-5KdMY!DA^u;BEyk~0Po@vdpL_VLj z0>dV2lUW#VH@avVlvs1{y;>X^3&%xK^>42EBy@6m!M}2+SAXQ4X&F#wkQz}k3f1}W%^U6|jX-&E~tW7T7 z{P<9;-5}4;#5q7)y#ZFG&bNDFze2U!4O`U%Mz6R6nIRtER^PHQYC}C1!F)X+qa}{v zZT|--8efI46!wDB0Lyo5ON>lfoeePmNottuD;6|7g-LYAoE{nGJK1UfHc&$Uc^w1Y zI=`**?C_d_xy8(5dLN^}_uH=D9hf*K+L2@Y1B`=7;R@wV`=OfI?_KELbs_3do`haN z@R~og&?=v}wU~DomW7NJ6>_{K80wp4U@4@O-)RuSsYA@l=Pp#Ygi!d?+K>UWlH?Pm zuhZzE*)w`ZxD*N+SA?^WY0wd?mp*w1VyR;O>F9-`Gk0Varx*^`Tu9`363tGCme5CdU8LlzM6R+`-*{F1cn_05T#R(nCVaLpa|98@kt$I zpDUaCP;V9*e*Dw%?1tgFCyxN3mR={syOJcX>0JEppeg|{p6s?IT1cqA>N2Y{h#Bq` zKErN!x^`A0SNV4vn-`(8%sFjwlSQ#Y;sa5ZQI)HfuE)tujb?Oa8}bc2$e_o^pI5hE z+Bc8HJ#v#>8;}xRPeDHk7ICsEQ^OvHKo*6eFA6&?v&x_jF66Lyo$aD1puwhuJELSl zzea;Xu4aL6Xg(SBG*kv$DJ<|28ph*I#mz{V-(=2Mlyr4;KE2xtf}M0Vj`;EqxJgVA zB!J@S8z_NV!b98`l%Q<{&f2P(HC>W1zTf7TT+Mg&n&g3MyUD%Z9iacd1W^5VMX$>% z_0t-Q!ROQcZO^-}(F6B!`~G)N*lquCN~#>*K)2$SKGzkara{vQVrkz2@C@NIU#nwO zW#`SWhu~gE%{v;la8?_I6PVh+=^L)=ZYDp@i;cWC_ZhNBlFp~{n5fWEHzu#Y_w)T{ z+&rS*?x>^c3C8<=#cJ?Po*Wu1-#fZ^#y7@D{7;-ysdZeQ@6msL!r-@~XAm~kV@5w$ z%m1+!+qk}pLZea(kleJmy@}2RR2h&QMPvCN{;X1Ac(oyM32Q1;V!Nu?B>TIQ0)Z*` z^e6Mr!X(QS#RCa}7@=@Q2QS>%sDgMYc^1cm0S3ETkNc&8tVRzu4stz2X_tVDtRJ=x znt(LDW6zppE(d0mDBjS$~lqrb+&qJ3_od6@(Yxr0Y-BO@Ej}y;*AEh>`5ZdP- z&$0)c^ZN>T`;s-ET=!01Ts_ym8o&(WIF|T1La09{z$M4Az03KR77p7=Gi?*JLHJ&q z1#Q%ooM;x2s738#m1;X|Y?fDI8=f7u_hJZdTCc^?6Xd;ttrTO zcGqqUV#VYgS#kRQjw5mj5{Xv_Fr_^%g4C~NbGu3qv|k~zRUal-6f3Ml4CM0C#+DI| zJfTrX3g2%&{r_^FO$%DAtShPCK z@~jfBu2A14^J^2c2ZOg&{T~3^KqSA+&jr4pYb%o9_&5eOYudiE^S>y%z4%V~^y97b zZw>W>_a#c`8^FmTMxqFg03&k?I?^;%=sK_IyRTlodiCqquU}hwp`#_`c`&Qrz=Tx@ zsUxDmz~VG8(=5HWi${<>Z$|_DP*j`ghFDji{nJaTaMFK{`WW8Jx^><6A5N~%a}!NK zDUvr+^Ws*yF7rmWQ=Q1v1<5m;6h3VbbL^GiOM#s65{eYk4$zN5#UwX)9JYFqO%~47 ztE#j)j9_;x(!2%rF1bAS9%vuH{{ZTAk(eC`{&S6H<>gmmA+qd&)U$MYKe$p(mKS^x z!Po8mH=oZz{@fMB$b`!al?~P8@IIa9)$|ZR(ZYFozm3Mf26vGJi%Y)z3qme`U8zR` zimpn0*H(sO&1gOwoc9!WEH1BjVsKpMY!u=HE5E9rILww-wbQ9-GyISMW&!y)jN{qqZcG7jX^MhXTlLjSx~}eKraCi5>u#2$&J8!t zY1^}m>~<=bpwkri)UcEWW zs0ZF1t*%YwA z!Gi`26W+@pUuGj`3SdXRy!H(<$i5*117*RBt5sB344(}&_QM^PxTE~FzXgVzDI>OE z*03&KaFMIN0JXZKCV|l5XZ+I$2?mRxNEdt)olyFgtjs^3&jyzUmIB)o8fFq;$16!W z$-XiF0D!hugYG^soq01-^-2P7556gK1Ho3B!>$om3lo@U=?f zs5#@Z6VmEF>;cFrO_I(TN-w521dFinjW|}wnZB}`TZ}}!CsK9g6&kqYWOQTBxDx(P zaubHb3Pkn#Ttyl}&NhgTqJ9)=?W0%(%I7-G4`%Ll?4<}43~4jD-pp2PHI zi4x})F5)1IU4057M_Ar(wgmdll0Mh1TD5A`t5&UAwPMp)epZIs%%)vBphHWR7k_Uy zjz|1A@P+8>(;Q?8>@`fPj2O_^@?$yirv+08SDcWW(zcuQi*1T`77O!W7e0;B3)wD9 zd74*zvZZ%u0dMoS9{B$Ny?+r-N5v?!ti%One7Wm=Des5yK+q2H@#~#kCUOQ9CmkOmb=~~FWVUS+{V(K|W16=KmT4$(H*bSpg1(FT6ywI8)@H~G* zHHY{EBtQF1=ttmb@xOj(yj8+uUify41zYq0gUd5Spic)0?i-99&=)0Ll}D%ixdxOU z)>n)T8yLL8>XLS@iK=viBYz*CaSn{I8ZReA@^$M!$R?U&fCNFpS=avnA7b&N@@c`P z+#}!h3HhPuc^ByGoiV6Ht<{Z~a}QI7oCV?4;f>$0;&mPeJ7j#z$v4^n0iXfc!QpL< zi0nfi$?D&nK}Xm0m}_2e-y05?ygR~WR(YR7{{T}vKgj$q!u;ni{1AG&<4P#NAIa}Z zv)a`0ag)4sQ}=`tNwf;^Ezu{C08+^LwbdlxH$mJgIZN#;7dA+NwT;9Vy>(<`nJ;q_ z^WgTp(P`wm?rpiy0CIk9qm{+N!5zt=8R}**)9JFrS2Q>YTY*37-?Ob&yc_l0ZZNFF zf`tyV<^Boi2S{bD7rn+f7)FSEpB!k44s<9ZFH^#GQfLngI%_RKmsy=S{m}_hVF&(RYlS~NE8M5t8RPITfGC25UH0SyCxx-{{ZkgP+z(wmScuW_eKXM z^f~Fi__#(C+O5Z?=x>Rsgr-5%!w%B;7ANXczpxRhHJie>bN6Mr&0^rpxGth(tOF(x`dULSzK9E0%^e+E5jp*QHkQx@@qi=X`WT^T>a$Q1SiW#C;tETWPH z>_aOt$V#6H5)Z~8qdYwQ7Y ziAS4joth%ie5YCTf(tZwI!!2&a87~`8wST zNx;Ch&gj;W_urFcBJ+~}08l?Tdk2Qjy9XZnoFw5Z^d1T|Pf}g! z53P*w>5!auvbEanxDS=7$c008ub<8Qw7?8!=u=zZEMY$;jSIkN_F)v)dom=R9r0mj z1z$Cx@UVU8hi*kKBx_A)`OoLT40+8EtKjkxSVeqdKdn#Fw9nRx!`g9My1JU5Q*RWe1uC!C_xg%Sl4{Uiyx1b0)Enl#U>H`D!BdPkESkIhS8z-OJg@Zi8TZyL;CmlA z&a%tO2OsP|AU&m3uJH~~3W_c+e?g$Vpn|08rSn0oSPj=JGSJZe`y^mI=Du$wc z5^4o*Q0aVV*zw%c!ag*iPKK%W1_+C-yW_j^_m2m)3K9PRY{!I~%Tbn1Tv1Z_32hr7 zvC#}|l@*zjY18_;2vBxHZejD5JtZwi=`kb9;+|&)c}n!-BSccRrjM}2T|ziKvl|>s z&r9;8;aKCh9(hj9MIwb#r$2L_4VI1sZD?UP4PRk3Jsbq810*f%p>#~T-ITRKD!`p! z!ra*Bp=IXY2{PgE3aUecWj3JY-zM8 z@W~M?xVpJ+ceW2oeiPrNdML#-8s|B1R?_(wM%7i07`#R&fn&BPnGRr*+DkCAE@N=Q zzn;>SfO@!c-SPf=at$DFi?Tb$J(Z|v`;L2Sc=6YvEMj>~Y?QsFCa&$DhO7Nh-wY_3 zD%DlPWHBi@W8@1vwtBc=C}#1?r*1QYRW<2R+z$L2W2o=tU8jV1cCPws25kTI4PT{2g0E7CKoFV3551to<2~HN| zH)o2wkcDo3!uGR4MMGorF!aM=XITrli$yavp5ZpSCLrPCldeDshujF*vF}xv?50Z~ zGEKPwFKDL%lcO3+)*}J;OYB%@GvJ;AfsFCNjf>D&&>x-pE|jlYpxOGxl>t2csHfbq z5ca~QmGeupF`{;Gvq(qXrVMjgIJw9Hs=PWwq8 zzS5DCFlo-R{{V6x$Bq?s^>jJv=RiK^E?vF4h@1j(8H5Dbz^p+30O?ENFTyP4c+^vb zK_1b>9TjjGOiZJD<184w@d`9+v2JvTjQc+c_m7AkRs50s3ASl-!j4L-=!}1rOdp=Fji;$-Ch+&w?RDfmq?N6z{h>}p=9aZH-ivUxh3_z=39*=ui4^NHNV za;l4bt|Q*H2-?(;nuqC|6P0eUcxcRmIX{N~08SvL^WKzk3T-VDSgDC>t-8V2 zH&z+7`o3*&Mrd~M3?pTijaypjaMIYEX*=-|g}U3}v%x9ko^SDDw0B-GEEf-+{>KMQ za*a!@tizw8Pv_Qm+nfiZvVC~Yd;23q?@!c5F+Kh6O$68Dq3ow#u-EFE(I23sGK3%w zZ_B_NHm}#=?7vSeyl^g_nN(pkLX$*J)7XBe`1%>gN^`YT>*QO<)q&XgYDQ=&Hd{o(L zc6@HkRQYlDiyhz$qMN*25&^2tLgI;%brPZjhx;H>xr$Yi#b zL`^TV7^(avjWlZ>QN+Bg!)4bWvVE2VG@+Bw4nUU_!W3+nLCKFBGv2o~VVunn6ch@9 z=J)|gV1>CM&a*Py&C6S9N)r!ZTC2W4a~Q!axWXB7ssLy+J34`C2O=#d#srr*y=JXl ztDQs+qRaa{@U2Jr`RqJ_o*H4;YbT$ITP+IhMr$kAAI0AP01(}BD)y`Dpp;|d&}3G& zi+wdzx-d5@35l2&eCmV>RTT|J^zgOOz~i&Mh|i&gHKntcoIw%N5Rf`UTwysjU5HL= z_kBZVIWRA5G_fQLbV-K7ogS;J=eIO76L7ju&*3nwt`K$1I&iD! zds;PCx*7>gM|inXa;%Y2%897oW2_45Jxa;+7FYBD7MrfAiJx;twSblrV1U=dKwxqjd5|iumu#SbtMDgUC6nFf*zZx1id2_c?^}?@8+#^N+tpmJR|}zYveKAS zuGqG6CEF^7`QPW|pcoMv6_hIH1!xV%+-B@;TMMaIQ+qj>u-NzmLzdRaWIKeJmPEGJ-pbGwJwiRn0j!3 z4{we&@HW%RgS>gpb0%`{%H?W0h8X(;@xrrx1JeHho+E{Gm;vEB$+@1K)c0q-CYTk* z6h;#W;zwlJ@nUA0J7S+m3UwO?d-?)o7R2|3^|RI0LZt)s?Y8&QX3L<^4LD@p+hpBQ#r(;w*nB?HTUB7fp+CI;;4ft~>OVsDi<`ba)6+V2J z@=PKJI;IbtK+^%XK~7yIVjSrFQQ!XncdR3hctvn02FrvV7eLqd!LHaoapJw+vKNjH z#1fJ6aE>un1Yr~LTJ;X@b5RUu2jG$pGFCb8%;EtHRQtk^kf6w2_fy-!;dbo0>3=?B zeM`uHNjg9I45m9`Y>SxURugwh`wS4UgZcNS=$+zL4E|6XJaTQB`E4kf} z+esJoM4;;ehSC?Ytz5qWaJJk6RCWGU2Ef_MP~G(ns{JMUOY=NUQ7z6g#iQt1?p1l% zMfS4!HG+GMlwg8>9fe+5jYzMfoSFXY>Ggw6Q`k#44qoYx)#dPf8Rg8syuCMtjQ8Fe zd68GHr73(5g1Y*Hz)dAvMWo9?*=@2CreXSi#@ferv?Nsu`4KFJYYd9UZyL&{1IMtN ze)TcT>d>Zi{{WnpXR}o^k^6!PCxN6-isv&eS{G=9Zmb#z49^>#3-|ksQ9=7s^HaQ` zuLMWfg>bLR)gvgm|q*TNp_^S;{_@8%W<`jpnz^RLx&jFrw;;=Bs^A z_QpkXJSDxy=|`gXf;~SRUzH^q;O(+}#j4zB(6XJ)Y471aq&4HV8*$0COre^caIAti z@W?vfmMn0Iw@1ync6Zu1N0c4l=B4@THYXMQ2EMJX^5%txmupGn+p1KG(YS9TCDBWO z{Fb48)W{4a3bQZCJqv{NFig$U*eA%NH1IV3=Q`0b!dsko2p*gJTuxnNA}<07Xs=bX z-FfoAMq{JNGMt#b55fhxS!HT0ty;F~_hJ*Q^}&Zzo-NvBXGWZ8`S{SUlMO4-k@C4{ z%caYDk&v1)VslZ~frj^;3ckv^T<^)u-$xTq6Eefv1cUkPTe#P*nYJbQJ)`$~1`$Pq zF^33utMLa2A|!&hTuux0c(Eyi8347cHq3ml@kq*Oe`%cBkrMKVw(;VS(t+Nk31V5| z*u`V3`+Wt?l~gN6_;HMt>X*Ay&q-EZIxGgZk|J70piD5*bzZSmRJPQnJ4bYJQRTy^ zjP3JKT15a_OdRYk48$dK){d0Pp*XNWt9V#y`m&s?q{r`z@V6lnI3OqM4tPH23`x(;&x@-9u(FUKJ}YwE^-$v;vcQ6y;dT- z^YUJSsag1PBhu2&Nn0XDE%Q8-Bv~;(&lcdQ!SXbN)CEW93bif?j2jscg`O-p(Ve)) z0Sy2mu7GQ|LOtmvl^XJR0JS}PvPIBaPno>7(I+jM z;I**;yw_t@!EF*Og%E)8hwJkqNC5;gcN5nI*sPycu`|NDm@R|<9sRb z`~LtB#_0BxZq3+d1|KEM_`E8U?HNPoZTwk0NoqLh`J?FmkES)4mn!;Vdr|C@?9D@D z9g6!soZDV1&QEIv9i5GikuXFDYb+25<1)a95$%M=kh#oub?ILIa2RK+^`~Q_Uy5@# zHX~4ZtWncHFn%?tu978=cx>=+N_DZQ7uhR)`bF;^{-TfRm2PlD(s#^J7^~6cjT5|k zUgTtyn8y|&?|Fy8#4SRIzh>AFg-s{wp`>@TVjeO0b~9vX;|UHA9<37lL!oty5+KIF z3z%5LDxU7ul5n2`0sJccQ1TO12m-PoCZVIip(p12ZHH_@>n!10oxU4b6}R{X^`u(u zfl$ol^h^CvZPa?o!~0y=XY#QB0Edp5GGxhgJ^TirBIh(GIEnnzkS8|SKIts38MPUcK_Zp0PG@t*z9DaIA=jBIKx&m7Y8 z2rhh&TKeEgB7x2HFG>6eb2OGU8wjnxB|sUcokg;U zY-+XIZ5?lsim6`edZ+03jeblXRdXgq=jEqShhdr1(J82XniBDc;swJ)cJgb6!A_^c zD;UlKu-%9dyq!C9huL-Q;@pOcJ7vk;z4xf4wGALy&E{C2c2&i&(piWxwMLWF&3 zpTrK*_+N(Cs|T`a*)a1akDUAlpFwK2qVW&mz8Q#TU<~cIn*LhE>bxfwHI|ybARfg` z&Cw*;5=7j0jYjj}72Ut#h!LPiKy-FCh~<5+vwHUW7DJ&VI8a-V*{cbuQpH%$j@?c= z(&^H`MZ!iB%iHp3x7T5Xu6R~j^;2p0S?zb5H_X-ZIg0M4u^JlZ8`gGh4h&;;7iSoo zdVfi#hLrE{%M3e#v4r@cK%46^(cUXz9vXryl#P99v=sjUi2l*g^K`}jU8*_p)7Vf~ zT|pwZgX6NH{IcOlw0P?9luo5@qxoRN{#68iMY~gHmL_p|*k)`P{BS^vKGtOHt69^j zm^?K90O=miUxOojUO(ZE(v!ukP8X~mrqgnC*AE&jDOx=|a@gC0%dJRM?t_(wZuHZx zO97w^mpy_dtyRrIBjEx5TXVJ1-;p&#S;|(XS$GA@W_4bPi~9#MHyGh_6w`sBMtly8 zJ43iKoBO=bUuDf~`|&2R_FhWQg8e1>OZ1n*OX!5F>E~N4{FuYuCEb-r)?TBvX=YAAj5Iwr@tb9z7VUU zS25Z(EJ70cfG}!NrKOH-XIQfNn)U;uEp@L2eZfXS3m#kYnEygd~3dNNXgm07g_giNC~q32HsJvKWJT?O~=8e^3Tf`Je!x$g1H3v*xHJ(oy~Q3SMdDKo+P3@ zIi_;U@L}k#_*+`BLI*xw_y@7tSqE1%vHt+95il90i*9w_xq3U9`(`DbzdRq2JRk9a zX~?J-iQ0Shuj9(^cxPnZp|ncC%?4<@WI2?knfH=lpy$uiE_e+gY;DcGvQ7qX$h}Oi zLN!1Ig0?i09&c=R=we1L~4v#k#v^*xp_gOSo*R3QoNl;`d;_=F-ONVa-3-lg0u4gF${>K z1#&pb*2@__oBR3&YZu$Eb-Xh(TZa8wXHt6JKQ@WvgzY#SZ!Ay4{$@$Yu}5N};}=b_ zSMz134i%_78Wud^&-#pcpo!*&Y59aA3>`4=LnJ^FjHXL%s)?9_5;#Fit~}?D{GKSZ6|tH*g)|xwdB2XWAC>z4;&Y}oir*fse5jFd3Gm8L&7_mrG{L{Y7v`%DjXuTtg|bywgLmRH+m@?b!8aEklLUv zDCuS(Bez&|gR*7HsZ$&YRE9wk{)hMY@RgdwFT12YOpG!*q}Kx8d|U|pnWhW}hu7)V zaM5bCqmQAt`~Db@)X=S4?l_pxI6!b#o2L>GIx6(tIDV*((`UYe_~G07v#J7Lq2GHh zX>T29V>E1ey$ew;Ts7U(Rm;tt2cg6Pk@K)5XgTMDPT7T3Tl?BnDcLpJwplMfi$>zw zr!zuJP@v+}MUW;;pxXSc29k%quKxgPbd#+9)oa`6+nQ{4pAF6WD@pAi$j-w)oFk{q zk@D@sjn=I^fc=0SxC!0Q_hAh_pSqZ*qm|Cp4DdAvXgwf-Lg;iSi~vL%dT!yPE3+&G z_+6?P|#rRy0XU|f&LLG8i6&hmYtv*WU&z99JlwJ7kGWgSv_P1#LGCo7jOyx!G*i?=e~QUu(8 z-ZB!BKt}m1^?+s|0o3IG0GgiphA}Xi3NeK}2>gfmMdX{V(iNUqZG&kGw@k1B>9&a_ zU-_}sL#_^uClhOa38sl|ue$#LPk0fmGy?wstf5Rk39xwN52J3r}@w~tOc z?AFcmSjsfYx+%0P$)^oAVUv4<;qY6|hH9-Ton7&~u)*dB2D=8E&W#HA zjZua(cHZ=DZ5>jUXhag9pXS_{-|#tgBh2fWACLY1JOynyG;@TAo7Qu`$RO7BsZO5U z0$I0rtCU(XG)=2MTQXU64|op1=fhE$8$E3qpjJX^$sAR$c;!gmLg`)dF!?2AHYyq^ z%KI#PIF(la0Fzag;!-Hu_#MIMrh%g#hc@K820@7Qzvs)$f&(yXDS#cVbJRLX2r7@` z`L+@~agg%SPaVt$wV%s`I$QYQ$86Lb7xPyj+&OccmPzfxQI1UYuQJ@`qh@YgpF_~%)msXTMuI7M)2s!V0^<1d z@%z#g=F9Zc<8u%YP-()aNtt)dJ2IS#2RJKj{#M6aoB_iq^CDxXI$#j@dqFr*X@DfK4*8$@d*(qy|m>qh>nUcPpzs)nzT)RT0e7l31Bfyl?(w>!{|kGg8R$m zJTM0y3n^>`cS(`JiJAb4;y|uD+2MZ*`TUz2M?IC$K@~b(Pa1Q3LYyP%dBmUOOX7bS zmxnGGH8dIkkv8X=_Wm^IyR5zo379B1`ncVTb6+uOuI%v%erPPYdbe4(>*ioVSz^{f zDekl89f)ulauJO)8T|`h)2CxWS-SJYMeqfn4M8Iz0IP|hH=~!VYWSlzf^)dryjbCV z3ESM)lE|Rbig2;-)HIq44PD z9#_p(kd9AC7A2qRcuvnn_&zVvLF2}hYZd}W6OYYD>B$v+oKiGCoF&7dm=V>fGT$Cg zX=R1l&H&!Wdj5XoX^;I>8HhoyI}~+BOcAMjqh;26ow4t=J*y-yj^8A67ElfaV(Y{u z-TwfaQc+=LMNA5qs(0`#qlobDI9Vfw93};Afaf9WhusbCYicSw(_7#sU2U2VGy{|7 ztEz%F6h5X8_X7nED9x&n3ou&-J}i&S5njO@DL87tI)ZF$!W}RpvecdNsCb_P-~9*S z?|FX2e)r}UbO&0gT~c2p@ev;i{G9Fx0v2HeYX1Odk36HhIs(k%<8FZH3G^DJ!yLYm zy0(0=LS*#b#xzJsvP@gbrIc{MDkEeN6n3!Hrrrj`!HZ{PJR1U@F@Nvz#H!X<9$=;q zHp~ox80mM{z0blDpiyTPt2F2>ibRD8hjP@EO}P#5eZ0jU>H8OWKHqA1nLgJLLY1T1 z{5RRm?reVyn2_vG890jHa&;QNFd^6|+EuANHvwho(qX3u{Lr{^%iP=C+ub|HPuu7y zmL7$k2o;38zF0@8xHQC-w(q;~v51mbmXz4m`Am&L+PbJ7{ci|KHiMUP6(`)b$-2D+ zxvhtb6O;Em0vYQwPG5>Hm|w&vhu1w!3= z8)d=Qgg}=sFRQC?GDbB=P`Kz389S<9C-?HiNuKVzccK#bXQc_U-=m@>X{$0yrROSb zv*CRvCg-!E7xZzgFr?(I%ijrghQ_P|ucG^*R6$L-#n-^doC7omgm9Ix1)Bi%9<>yi z?hKZGaze6Ds|?w3xUPx_=jIt(g38`CQ7oFO2VhMH`Uu6@YH9_H3l)uXnA8|Je}9f8 zO@X-&la=yZXahMACrv0E6H|VcbmezMJ;TyCkNz}!)PwAj`BtFqTKL6VK7nCnwGdle zMD&Y!V%s7^Sj9#B*b5q{>jRhW0p`?2Q%Gfa5fw%V)vk%?^j|fH@bf(BGFptlG~mH; zB1*3rVaehP5Egk_3qkKM(v4Ggt~GBn;?%o_Q~pf$$t0R?B~0I)$lrh%u4DFlG^{&H zcaFmgF6@HTy!(SAG1YnWrYBS2FTno*hI6BgLAfhLW4N{Vp17#Htm%C`bZbTS;Df1i zrj%7yIY0=gx?Y85`=ReC;;o%8&t8>aW?SrZ((W2-jP{Cs*oa5eN)1LdK9%%aQyr75 z(F@t^K`=~T7y`yoF!oFh(yjMSS32X(G zcS4*PrOpkpuwvT6n$oC|-67}|i<@Exu8>n2;F>@OSkk}3{kFD-{F1On> zV$i%;AX%l*nRK;o-rm`#RYGs}RdzO7M&cVf{H!2rwx-0tZ6R-h?mLtxmwTZxflU|Bs`OYwkPl7newt*a@B`X12$ zjV4x*s#Mt2A|CN_)n4`m<{|A;+5-)%$qwkyhdY81t%q+3JWJv7 zTmJyP7nn$}W;b98Erj*^t`{!Y@^xZ3{TNo&m8k;N4P0-^RhVy$lKFw}8o$!!Esl2)9yKb!)FO@opd#M&<=ud^!3sxK z`mMuN5v&`@D&L!N{{H|T0<}9XS#bz52t&I_o8P(C78B|Ys#K#EC1(qYUpaFaxgEp-W`YE{hYNYULYz+QaeEx5f^;-H z53S}76yW~9-Qo`li!oe$;+aqoY5*KvFg@A2R&-NmCL=vyTb*M=2_@-Y@fE~DG;>$}dGrO#RT zZY7AlA^!jdtLWPxVaEqwc1j}sys5(+j|*emeAu0&M@t`%BxdehyF**$u(r;Ap}?Zm z8Q9RI;-xB+v)`;r=I@vuhUfZh+*EJYE;zb-i%NgGrXKJBf-J0*<(P}66#L@VVEYzt zpka}5c&n5(wH(zmDhw`;o~`xD8RhZWwb16 z$X6Q+g=*!5y@u_6g#4dZUvyn!;8-{>eEV1F1o|UL{e56Mz|ChuA5F&_Vn62h{r)^< zca5G;T`>q&@+x}6XRZ;8gMTi7XTzOYbsF+eH0l{b4X`kn(hBnmLxkGUsF?Ib-hTB1JpeyH zRaaG5=knpk($K?RfpnD3S`TTc<22hZ22~{{VIW00~kS z`b$4abee;4OlqtiNg&VH&uYa29M2ltS!kb6yuQu=6kBX&hjxf@+|%fYCo@hw0inbs z#IYs@M>{(I0H3+<`}}x{xOSrFi;p_Mz5`CsT zpmFy;cuwwZU(^v}lH8i7DA?rv9eVgDdJh(htLRm}sC&oYx}Q5R>XK;q^Ev$~P(I3_ zDt)xE6^CFe+5Z5NAMil`052c_6>w%Wyu+SuZR=KhULC$kjRmRKVS!eRN#eePQ_eNq zUy%N>1T}AVoYuQ|JHnXumz1k;TQMVVuMP$uSwiNUX%1AdR=4I~tfxop&>CWuXWM=g z$iZo@Qxyfbdi8i3(WG3yAqsip<|S*h7zU6_i{ex zP^Y88n~Ap!AEX^mvRKV^{SOAdqs;z~0^00fCgaC8YWx2HP939&i><1GU?t5j5fPn8 zu4D+w{{V>mL|o>8wL1o4M*dndj}Y2enB1oiPJ_~Ljhnom3($X2r(GUjl@eIg`X7no zSCQZ(4^91u@P(_$gsZB2HJ3c>G|;Z+>Y;`n58m?hNN{p5M!y8Z=a z&+F2Xrww7>l=O4~<`aI|f2ZLYOEDx2MzC z$V~3Jr9?HYSn204G+r)>u`uwUK!F4G5A)~GpFVu~^XJayLS`J*$DbFp==wv#`o(|L z=j4dz(D?j!tWejG$;Py4D;O29`j;4@Vnjc-7e=O+dFdiGBgZnj5A& zhhK2lF8tTF`Z$Lp%OG?-&&xVTtyu5KMud9c<=_E6yTzpR?Azh}BFm}*>gIzw`Hsi6 z!2GKtBAu*G+X6d5cjV>TO60nY-s|OS z_$8!gU}nkyX`l)K(xIB-1|p1jol8YI$jK-i4Kc($8BtsyTi|w1l*3y6{9%lsBOvto5MtL-aWB4L` z6;5(EU@FL?-5$A*=lyLh5?@c4@n`j>wl?YgyNdnY69I;FOC}wZcGR`TQx{sL9Z9QG zIkG(AwVy~Y?l0js`J_uNbqojkusRl;U^s0brAXTiRN%^_iEAzVfm_)s`9#YnKuk~i zJ^@X_${H$S62b_T8vXS+;IDOKIW}yBvrsp#VN0^~cH)5=CvVikS|6qA>^nQnil8;4&Ri zBq!HIGb6eAllIoP)=0y*T=-0+;A`h)QuzM>8Z+DW6HjB=#Jp?YM&b&dt9S8SZ83|U z6sG0gNM5RrBdQf!w>f5l;6ku9<3W~v3iel3YCCMiWOtLx!KxWUfyMMSdKXfJvZpLu z9dhGa4KvIu`5hc8g_bzpjF$C!fEcE8OR^$Kx?BK9M;C`2`ezyI5+jw<7AE<>HM+|a zbP}98SnTjpe)>!I7J_wtJkV#VkPWRWa}i+b>#c|NE680y7~8J>d5{>M7&OmupsVL# zFsA3C-Kx};)}U0d6bVhiNb~G6x~khh#2s8|20pZUjQHLYt^netBcME!&uPJN$+x6Z z8Xz3ih*Vr#?9D>Q%(yKFr`#1DXXJrT_+1}Lg00?SYPSU)j2^@|8*muy_!u$u^ou@c zAwU}e#EtzAUcvluwCx)&z50jq;7D4i?iUi?Z28GUO*m_N7oXv&aYk8gtp5ONdlM2k zaCSdufsM>p^638n5#y5Fmw8M%JdJwOx?otyV$6W9$My4rkKDf zfALvoK$M(9EUo_l666F6rEcNqe+#k(2`d3XaP$&9Iyn*RWu{LRewv)P*>Q^WmR*l@XO$;}|$ITY!O zYZ`X%F#*gS9b92aLQ-^pTP>|VmhrxI$sn7(jIIbRoux$pbu1){wA~E8iS9vZCGl-7 z7lcpkIm40EVM55M9P0>TPsHZ%>!a799_g(;^6>-l4^*=Wzg&#_NFkYIuOoCe_;=`R z5LL!Nv!VBeF@0$$8s69|iTb@Q7uW+C>j~7oyv?=0GMzON{#9(2zU(fV%&j(X#%x~N zyN86qQWB@#JauUVtxjwNmYp})glEbAdARJhTSlPEAzaE$O}VT}b91-hrXs|rzlI=4 zRIfe#DzGjf_zs)sscC^aPX7Sjc*~5^#=GQ4{ZU4u1%)MW7%19)xh!>RBeHG{l9N&a zTOxd}+LBZxn=Y)DsDA`WRXfU5{h5#Bu)S8;9py>lc zvBH;o)n7#y@iWiie&O^93vRancAV^Xovs8bDX{acP8&XPsw%tL;3&Y2J1)-wN-=r$ z!&vRwdTPBt3w7G#*V@?MJAsb_H1XfO)8QXzR;?Rrwe(A-t+e@1ca+d=@_XCYfGncQ z9$NJ^ye_{6U>p$IvfG(kW9o;=1nln(4%|KqsT*&dy|`%7?V%=$HOuNxwkEJ>!+0B1 zZGn91D)rubw~MV5l-#*o^<4eFXr8Y#d{c8m;Ns}$HToOucUh5qwI}IT*8?#z^FYJNR(lQ^y^=)03@s<{&9yj6KklS z8AR|(5nvU)H8&=9g_JEE8dSOxDO#_ ziD{{|9}cLnwulL31R%*>@_W^n%odzm5UMe_u{y2H<4i5%R+1CIoE4VWH_hD+wHE!w zy6)+};sD&gJjub?B?D>fksk>H8Tu*R6V~(O4j&j!F+>O&S`_20{D&K-H(|MVzrXrj z4G?vS`Vm#DN-m#ioCHt6{{T|-#t~qS=SvGm{h#&tOyh_f1)A#G*BM+Xh2a(%LBvS~ zCnIuQ{6%8p@Bjw;Ho9t;Kf&l2$QQX?7HztzriAcsE&vdUQ50Ve40VSyd$qSMO?vL? zga-z&6(&rDb^|6!{{Vq$j(OTRX#W7*IQ}KQ=4C@vFF=>m6W3gBa|n$$0{TmjU<9G2 z9Sx4~6H;JhvJJ)YF+zex;(Tl+Wu6kcX1KeO&|`mX8PVUQFeL*vaNEu0Ytzm5I)!4{ zpllvs@TU_00G#qU%BxV24Ug-dfN}#8n*qGD2FnE?4+Z}Kbqit%^}6G7P~~Xf%hs+{ zwPjUUChH*NgN~DUcK}33qH27!tMky}K{o68QpAKs)3*M2gS>6S7pNm|NYDY?Bqnl9 zY359?o7@MX-ABkU!ZOf6oxJzCAW zvw7cKX)EF0$QU&x!MXnMexX7iK?+!szp!qzX$7AR62eBz)S|){8a9Cr7>Lh-2PgWZdwV(l`rp|LSmbhpM{Bx?e z=$YsTp$O?ZC5*xZH^D$SeN5!VH{6NA6=W6<3>YEwd zYT^57CRL85^_P~RmkHnP%LL069ZLk-dH(=PlqC2%ZNVugECI+kXg>uf+eD8_Fq@TQ zPm!rgjwM~+#6UN-{~a>Uk(;HQB|PAb(QsdG=>76P5Q!3oO= z=ht}mkfB#g2lM&isW^82vCWWNmE%tv9$(9bnA3G7J=OK3WTayDHu%qCb4r&&=HoPQns?06r<2XkN3X^wdU%lCFn!@)_+aA`1Mvuj6Tx+ns5T z8*3{gs|6!5rvnai`lj#ct0-e#44XW{9MlG@TrSQU)v8-EoNSeIynM-{J=~!h@boyM zg{D2e#iNFcbe>Xju<8K-7{7{ zb7H;z!>+pIuP=uJZ6mw{5KU7j%-f1U2JYzofWj|eILfL?!XJfeJiBSTosC#lqMI(x zy^RlWG^@M_o#IlO4Rq{YY2Ur)ow?u~ET48ih2_SLYrvr_Q>g8w8iyxk!LrM|vcOCK z0Iy=CYT(l8iA`Qn-6&;|oH`mxMC4ntNqmk0ySg+!fsbsbiosl+b9AFo7w*&4wrz82 zV`|&R)V6Kgwrx+XsZwidx6@9Ix&6L-@486VO4j@Sm*lLoPxgME-xCF(yN%0$Q$m~W z452o#AV^~{<;LkZ$W^<*Y|u#wzAAwJGC7RH!AUu~JfK~yP>pbIJ(zX%I1VZcbPA6^%CTGc$xsWu?6P|VeIBKpV1}M~^1LTvvO7`S`<>wZ z-~gQIJ{&8y`$?Ssi}&evse+Q8g7l8=7Q-V8!8dBdli#Gme&ewNIp)OosTl3@9{?-L z=SK-u(so|(=a0nEo7_KKuW!+|tot`?zJbtFsM;ifr34rRE*kWf3!CvjuTznVDg_{@ z?QsT^eQsSd%!yyXzazRtudqGMv^fu@$_yrqLZPSCVbC z7b+1rg!n`NcB=Yq+B;N2`5b{UqYnfYhv?xB{2{!W+NYoHM}YnG`MYnQzq9`IFa{$n ztfOjyp}rDW75@1eOwb($Ydby4%1m`R$ja_@g_cT9R?Kp^(fi9*s7r_P`l~9( za-SGp`HM0|m@8vQJPC?d<1;q~Wu4Bs23=yRc3on250;3DDcS?dGy*G5Aw1Cp9$e^y zynsl7P@^(wtHr&`I-ig`&(whkVyv9hon`?hR^UIa3obeY)O|fq7~ka2Ih$w zH($Q0_O1X8ot-^ewsJx@_DqoFQYpyEbz$W-IdwF<#zxV894zLKpfM$bu z!RZw_AOnNjw}D@w=kY)m=}__P{YT2h8*&ZQY!0XG|3m?9=}MUn7q7`O6#oM(awK^i zX2tUxu&nC=9kC$lVe=lSs0<~45!I{v`Q`5Kq3&Eb`|gfnIyb+owA+XjH?NI$ueKnn zTqGx-^#22}OoOvN&!u*Eu<3Pau$76%um*NV$6;qJ1~#}p!qdR~*!^^9Q<<*j2h?7| zzS1}x8|`Gfu)j)|(0JHQmcGwk8Y2E#`$oUQ$8zwk;V_`J4kM|IE4rVoKOLjP%BI++ zdT7cI@u8JuktJt18~w}WU$%6`tr@su4S%JQq6^)C-pC$)ok zcl+`biL@O-r0o_t35L@B2GoV|O~V;m1*QA0zMvI~T!}l1k~3)~w)SbSX_xd9#yQ>G z4jy~wt)s60bLs2D%>S{+&d+bzo5pt3%+hSrU9v62A`pjZz*VC(>)c0BQZnx^$XNfg z*vm^aQA9-$-pOp9iOzrkYF@c?5P=7v3}wBtbv=@w=GT$5rls_r(x|MfJyO4?*^jSM z3NIoi>&}$t!>S5jY;3&tr7?7=9^U~wGN`Us9ucepMaRC`VEWlSNmG|hAXhIXAc`VO zSomZyL)5SxNz6sHpda05Q_RA--Ctn5`E&mE0)L0wG`U&bZLJwi6i;lv&es%QwZ~+k zYZ9!en%duPM3O?|YWybZ>u(c7JB%wMa7{HK-4E#;4(F_6RX;bq&*5&y(JVuv+!T#N)FOSSZ96g>GX(q2it?(_9XHjH9>-K5Q zVtA4O$qC9@Mp;*>H^B#k{suK-KVlF~sqAjP?Y$Z{|IikSbs$?2YnlP!o^I3+W4mtIk+75|wt2dWOE812C3#grv)JxvgsDUDolIHgh$$_Tcm z@dEK4`g?FdqT(Iwr-;x5^$WyT)2-m}>z*~f*PRjebD_@3h7bX8kPEm8bzREAw?!t6 zPX(E#wbT*@*!&JJq@J)tTj~7VZzd;svtgbetHIuZSEJw&UDJ#vfucDFDGBqQ=&q6= z83-26Z|-Gp?cTeYQYFcZZCIwDG9sVZV=|X)Zp-oAyQU#Enem16H|M1lm5^_+Xx6iO zLz|c0W@0tOF0~!_c!c11FCn`)a0Nx8$;wdP`_^I^RyHx{M?)_^hi}4AXaf+QDVFiG z;V+d+7x@eyE?CSe$1$$50)ffi7Bq!(=^Dk88oKYFFL}iG6B=;`ogBJeX{>GGa3JGJ z(#f6J)!nzVQOaQV6!2a}z)%D#7h82!r)=Cj-K$g6 zg~9xe^xTyj8?|s(Bl)Gh-t}2#*+36Fw1A{R?$*yv-&OoPodU+$CJ;fG3sZ{mmCdq8LOb1Zk`DqqPB#-@Py?HDOBsAUR zHTf?)jv@ZEpWkuwJSL38Yjdh%;v{<=T+M|}qR)tN)+LY};1qvnz=|kn0OWA@?q{Ui z=#gfC|IkKjOg;s+{vmfE@mOW>Nob)TVxxMeM7g(|+gko?&$-|)I}pR%3PjxZctRKj z%q0XY7FZGnjeO63uaQ?RcXj+bd#a)q+IN!#x8RS?)&zE4`3UktW^y$)so8k6B%+~@ z^b(#M-qWYl2G3ys*b$$n!I%RB0yvfn!z zXu)TyV`~lb9o;B`E`8}|i&)nL^>&~t$%D{ zZBYvgM&NbsAM<3si+@wNj!jNu~=&o?(O0)jWmgAir! z_4Cn{j_eE^jzbeT*tDWK0nbQ`#|HM}+|-X_d^jbH|nUfXe&}C>qRJ zO2Lz;Ar4<+{eCEV4)m=U6<}K*5n@ zaESIv#sI>sBK(3plMYA6>Ke%~jr{oCoT;>RbzQwW$g5?;zdGO2&@?{Tr^0O0PO)KR zyiAA`?9gu%AowO|FQ!-?p|EWr2`#wr{M-Dd$TUyveFg~wUhSpRLFhUy3c!g=}5OmI3{@Kj%XgeTps&hHN- zYRGweDt3U85h8g6Rx)g*&j5oKzLe&B=}*3wTj(|!_VhHv>2!wAUS?Ey9nqlj-Nc+N zxOiu}PSpDE8Nt*K2>k)CjuAnVr`#W9VB#rq+oP_ll=^eWyBhY09IV!jnXfOHeS7>5 zQ%o*}aw||CS~oU3*7z)Re=|HJn`@l#8!#n(=juEiY=4S_`!j3`4{C)@$YFcf^M~~w zkJ(Y9^?%s*Q;3a2@AyfIJqB7=THlsDLMiIW#|8eFMyWX#Wu$1CP?k_!kKr!CH?GsK zLJdlbVwH~o(OgRuQC^EAm^XlURCctG9InVjCJMW-eBXgIHDVdVtJfw2%=@TFXG3vo zOcN2Lqb%pRDhJq}Sie}*&(%+SuljN}ti$$Vi}fPl=_(kz-6{}t3b$;N&(MOI~Et^--$iZ%ob=_iu?tsJ2j8tSZg!?L@>gK zuanavO1ZLdyFcqGZ9luGdsEpPAG=YxX3C<8D?x~IPRZBPdX6h2! zK7JRBm8*_xELn=wIAYuZ&6bE`Cz4?Zv}gw0ZQ&S3kqkL-S7_ zS3pBgWftr9qDP02x4Ec4n_urCEW;3p=x8xO8 zfdA#PlA32ZqL$E&$N%5^xglQYPdWSDVyqz31W|--+7f*lK;yEe(a7CbY|#sz?6IT2zE!o!XVV2gw19q&QHHg)nwA*!a)3abAmv&8Zh`H7EQUwwqxXA2>>DQ(V;q zq@$yH(d;)TS3*~M5Md*WaP}ay5!K#MuM9Tnp!tx40%N7J-&2Yrd58r&=HPbBsxm-> zw~8RxcBg_n79WKk4>9>KrV_({NQJ6W>f%>os&4ukw;gny{itl4K{)umn|1BKS>9Z>jiHlrvaXveqUJJgU+Zle( z+c}m>`Ous#wPPGOeQWahsJ%e&`2SU2_HcksNpM^V6S78m%*)m~-i>wUg=2UwF%Dy6 z$JbP525!ydSqf8-5PKB}FMUa&o*FDM)l{XZIq0hSOvu^(i{m|ipVE%Nq`v>SB>a=1 zx=K{TYV-r1VULkFuGmNP<(IIsLYWZMdr?LI%99J!?cg8hI|8Dqao~fjm(AjP2VTk* z5C4R>aw^;6y4-ECv{tid=nxn~4-keJ*$h3h%+h@^1ghTm;b8Ccm}^S033s;W#zL3N zfWi1QV%jG>M!Mdl_53q3#eSRJ%U(=ijjJq@YTx^!BF<0{&*Ac-AI>R*yKZlLb?*+^ zwS;8r!8am1fd#r!^H#1ci66eZX{qTixaKeO0W>RHvkSIt9mjLrpQEKO&{Ki45Nwx2 zwmUb?ciy-%!NcvhC78Z2wyRJD)?7*0&=t|#gz2+;*fG_vmD_fB_S(iCM zZ-I#YzjB5umi_@=|9kWpYB>>@bzHGOAta1fa|?AOU>U^kz^)=BfR2;tl?QzluE^JhYG2{RxtyM|D(#e;6dofMU+sTl*c!B!g0ssFQcD-y-zlODBbE zH^&0QuACKG<}zXhl3w;kHt1){dK!A}-z#aUntKAj7b)%^(p*$F%AdcYbQ<|7`z2r; zkk>{;J%grmJrNoSNZ5dWK;f-dI?#1G606`hNTQ@4j`IssV7h-l?91}dPl48*j{)nO za&horK+eIVrSS767Rgi?Z$J(m{$<<*?nHjJo$ugS5=ML(_WJ=&X<=T&`!Pentc~->% z#!@Zm>=u`v`6o&%Gugo~gjWC>`vxK(B0*=`KF!Zm0?)BjbH#Hsuzs%v88q+?-k_yf{Ns1;uxMg{*FONR)K}I z#!lXeFgyC6Jq&u;sagAbf<9_ct ze%orVX2 zeu(|P(qsQkcGbq<<~*R{(HfT z&a1+8%Vs@oe$)vT^%;9}cOP6K`eBQA5pZ2{ijrS)<~MMEQE zG)39?&jY1pk65|kSWB~9>p4|6d5NlAE~n4T6w2|vzMso8n6#S{L*DbfBrP+3s0r#{ zu~hIis1_jl;9Pw^Uw*MI#@JPh!ZKvc<-xPbxGYyI4+V(wb{YMXYqE7-`z18^1O)2a zI;WyTP5Ap(o~epfN3!In!=pDhC8pV%J+}>x+x5mBfyb^N%=>P1q+tZYZZ?1AwBETr z7PTAU?}jnO&1drVgC-g}lqddvc|sz=%7ykzbeQvGc!+P6C%#sHe{*V)Tsn?d!Mo#& zBJ=o%pv$kXE@c?89uJry?+2toAvOYD-zL0xH+t=>Fjq+sqa5@E&Vyq4fl)ROH_2kW zx@KMLeiX_OQ2!nrLmihDwVn%FP2&+0Bmp_BrPMuDHFvMC)@^b`QX*?CUZQo)FD5B1 z-V;P~xgVG!)hTjAq`OwqL?U4r8NZ@~5lg`M<5q96YCRZqgy@6ITRq|l>C+o?@t8ns zt)!G+wS$^wzADm1ENqg;z21_qVE`&5J8a|YYRQ8C+m~A{k$YLHO!)%OhjGk>x{e`F z?XcZ23E|p6J;-4{XoJKQ6?WVj*V^lQg2+t>DkQ44oe8nqw}h5l0~g; zJ`<&i&(o!GPBwFcmVwBOP&x={=MGV-<$n=oCd-Twt03ubW2NmMmF(7#kDm?TRxrqVE=( zswH8gD0rw*Edp^pWTYTv5hCBG8I5s{7rD2$=YlKDYRi9uGxmumlfOnXN+E=sM(fKKil>oel5)SsTv%T$HXd8~B~-&vyy4{4}Z`j)!l<~4EvzbB{hPN>Hb;7yMJ8bW8f zA}AxOEmITzLcPILi7EG_iVN^{C&weDetfsnjbsWO&aG?5SABSp#h+Hnieb=ep{&+j zM!_jTuvhR`SIP!QXYK6J#U%L}6wJr<3~BC@U8;t`HHudZ0(lZ>QN%p67x)*Ho3+ea zhYEYxs-`W|ISQ+%of|uu3Smnvw5;=R$XzdZv58foxINeFPTbFNAbypIb=im0Lo-UE z;NwJx%MtQ|94lGQKr zX0NQ$TDQGS@#nMn28{f(jJ$;p=4fgFv^f<5Ppcq?x&Dl=c4IP5{|PftP2$9`3pGZ` zI{aRCbR1tKBvL{h??pMzRv_38T{_-+IwS-tyY^^h_G}rg%GjcdrZ1@2c~9R*H4rBd zf}zioLpDp7YdS#XjsuuVWZ@bvIC~xb6wE#bvHxe3QGJ5*1QL>kY#MYNWIL(Lql9BS z_(i8{*vh0MC`B~Zv^Hb;e!TTy02u=nN08R{x6YW+Z$88yiV>8Jm$U}xUMZu(Ik~Q? zL2IW9%^}yJy{u$1g8Qv=^Cz@B_s~**_}A_Qy;1rw3_yU=MG~il&tQREH6ugkYV-w; z?%6~~ko1$CptG~Mnu+h$3G{9Q`Vp@U>`a~W*m=4j>$W0>k$fqk6GbUr1@m%jB@L`Cq5=+xSFRE zX}LyJYr(PdwpA%Kjoh|-2u^KvvhQ=drJp$)XubIsy{K+Ka0G3c% zc4oJUIZEF@MVB1bIovTAK+0fi_E_%W6dLSLK})hJ z>p>y)k{vkRCRLz{N@HC|DFNaEa*NcYOHgdHaRXkM;|ewdTyUl5c=#UTIkCZAHHP*) z%tmqGcV3R(V$5&m0=#m8+p(xI%Wc?2%9O%?NLxq@-j;1y4%NpqSxas7H`m>>;mAi$ zP|@7N)bW)7&FV3_B3t^DTvgsp^{Z?aTmiqZnTE_qsCas8@sMS8nd9feGEQh_ zVs7>|7clgNL)JVe2Fx*1N@joOKa@%_d5a1skMXPtj;{}$zsW-7=AR!th9rCc;P8$L+Wn6n89ajF42I}|8DRwCPZIU!8rbW>C!8`*DJ^CQ^zA(q zeIqP*uO(q*K3<-%{j48Z{wv>ryB}guWXNq%F)+02XiQ1_sQt9SPF&gg?FkMP3Uid9 zJO?{gJ%+Z>oio|w*NcsB#=p;HSIAI2xtGx}dwl<<{XXZ#Hfih>JEndWLBw>`mVLV5qNtJW{w1^5tZx>DF_0BK+ zqkLKiN#*xPe_Pp(4_C&l?{L1!CyE``ousGI3|wtzARd^Ga_P@cxpk8R0}+Mbptisc z47fv%I&tl+H-8^{RXI37HD%TLA`am)U!o686gvfi~CeKO#p!mOT#D2AFHgLi54wSo0A48Lb$G*ocm~e|6 z-X>E>r8NE(oL@C){Cx8>%9!-c`0`ieSHg1U;O{@#*y3bHtkp9;A%A2d!Rsc?&RKM7 zRWIV}8yH9D>qRR>Op!m*Xd=6A#$JtYQ<2_N`~&!kTE?v+`x+Hf!ZsggRM(5y98Hj_ z{^rR`fWBUDZ&~3z84FfbMw`3b;nJrIv7{FJb@XTb%NJFnZ^H`pe2$SD!ixu(pqC1Wy7uq8O0uB{B64FNl*Dted9rAeIu$$J3Uw zl^@hto*B~^j<#R-I2@`~c31AjnFuvMffCQc9e{nD> zL*>GN0kg^da%t1Pnbp12T~Gfea$m^WG$m!J++i!7Rqv2o;rKq-i_rF+!|D6E5qC_r_LbV&f)UpYvZ{xx*gy-_<#R~O$5mBI8=$7*XYW>wIbtVURUvOcmDVd#tV z2TTBd9RhZV&E49SWIYT+>E1hTl|YLdgA@;cqgKwf`6aP0NO8_Ya!cfY)cLS!&~sYR zskdkT{w{)HPFRH@LkXFQk-ItHc@f(CzN#CrSI*~&BM|F(L(CCF^Fr!6WT%Q6iGXFH zz@Y9l`#=lpx#fNcd({VJi2|veY$J^n32}u*^33(UT2hn3^hp%Py2zZULi4UZOZHe* zPThmV#-O`?6T|LE<2181eFns^S4wl@r}o#eTH9PW(E-v5hLX#*gyAAA@hnCq=az??(5QMB!h1~>5)=B@(M|;mi!=z43<8R9_vdlvq@9Us0vMXb{ zBGG1LIV{mR#okG?EPU|vkEIAM3mrDu%_&L~|5R@mm6Mx>`s6RWdjRL1`ZPC~H|BB6 zOO@{xk~>j`w_a;0)?Z0TP~xqd%_9f94A@O_gTs#3nGBFO$!v2WP%k^)^t za0{#zSqF=QiYS;b_@iK62lT^yJRTtV9?PL|P|TiMEkb&Z)_Yln6H$FEy{UFaR6rCx zhI6bzjH)1O+yXWY zR)?%;XZxV`EP_LF*;3C2GM%4^QZ%b!x?A2;RNf0WQmbgxb^|g>lB9$_QRFxz*uo~feBYcJ7kw;acXmlYD0}jkK%N$!w_rQ3X|peg?h_bfXI@6z9F}Lx*fBz4q0ES z-rA59`JgDVG3%u?9h!bmCWDQpF-`)@YfV_~KA#kO(;;3lY~r&f`ZB&=O9&2MXEwp* z;lv5FII04`pTE3Le7u9jiIMDyjEv1`At3eeriC}BS1RUK+X z2MQNcO?yk=el?VGFkK(q^vfH};zLWDHcBq6Xsg+BAdwgoUg zUl#YV(i3DGxu2-(+zY)aKS zVxQh~x-gpT)XYCe>J}5!@tH(fjwDokVPcU+<2oL6A==-jgR_F7W>uBU};e4T~uZ$m?cQ@2dDHS^Y4B;lCQsuhn ziBr1terLX}Os36j^2%5RD<&V>2#4vn^Occ0X%PZda3bg#i2l{H+o=7SwLB>96;1^M z=TZ4scll)nieLH;WmoA6=D2`YO$j3pdRo7HKHVUzE7Hsc$OWIX;Dh7wcO*M??l9J0 z*ZqZ={{X+j^S;B=&LuScTIr`ISn<1>P!95<>45|kEt-8m_vn80j0PX8sNpa(>gwVb zu0&Nv{met*2Sfw023w0s%JUQd0qP_IW6~MGqn%`1YtlObEPrtc)6`SgS|ff~BzE;F z?Yd%dv#9MouxQILHJT`c`^y!_(vfWi4yKW`?ExloIAZ!Lv&M}Ys*+#r^m)bq4EcN| z>b_D`O~PwH!bD@ZJCoz+ch*nmmyb<%)UTyg)I+vkFhPa~t^qw&EBlfhGzSt#w-t48 zoDvCcO&kb;5k7`~y8vNufUOKSDOCkw(C8r8Bfn}Z{Z_C%zZ3+VprsyQ4Srf&TzX7| zdu9}0o3qveZR3K9p`IfAkyx`bjNd@QGfrlNq7SFvnSq6|;P?{cEM^<}?R3N`6;_!F zvadS7s?y2+>#rSbKnhiU*jffkN&)Ly~*n2zx(Yop9l0tM_4O4m~w3J>tMxe0Cc zga{Y|*rJ=1rqBB&4i9Z>czWV<9(og>A;gJ6`;f}7kX_sx)30+a(|spl-Qh;34}Qui#DGV`Cv;EUDYDz>{#$4*PCL_SYT+Ht_5`>%uy1P`{`xV* z6#`2`Vx)Vg@P&Y3Y+N7jxw@`9WZCRlf`}f&4vU9mV}|pR={U<0kE1D0O7uZ@<3>dq zu>=Z=!UaTSEX28UKB!Rt)4{A$4KXky@}m@AuV!xdyMZLm&S$omMf^$*rvcXHMsUWm zCk$FM?e=CfyzMu(1(jyZXwEPiI#bKXzWy6Tn79Bws6GM8I8}{$H=AKIb!_h_B^Fu| zZjlnDEIW2$wvCDENNX@ZJCC$aLP`Zh}o6li2Ldg_kS{+?t@;#poTD4b7l6X0Q zLPt$$Bn1E*ERY3S7%>>*lV)Iw?kHcBJ>EdzTjp?DRFlGMw#Mx>BvW>^Pvj!`0-g3- z2tJ+ah_K~Sz%4G}oW9U?XgCL*R;K9DZuT^eTtmr)6vnJ2iV_BdH)VbafV?@wT92rz z7E{AV9=-mOGy6&0yiUSJCVRoI$_^qsu1Wd_=u{k8UE)Itn?i&PV5lvMcjaLa z=c7Yb2Hh-|j}EU&;I?QG)f&i2V?9rj@u#aMoK`1C#rz<-KOT`ASov;Ld z2$ei=!R<}q{{!rC<-+OV z9|@Dd)Ibo>XiDYK&d3Fblt(2RC}056i$qvrS>(+Q9b>RDe|L6!>CW;DrYWS2w9zxl zGqNqnOYG6s``1*TeA5AD(JqBTph1!X*HkdzXho|QH7+`nAb`_ly&@25 zaeos&)N4t7ch2Tt-wv~sXKRd^YL9X?Ec+strdJ_Z z%Y~n%)Dj@0jsjs!f#pxxQ9CA67y{QB%$!kBb{ZQj!8#o10czSlf0U_V8ybOSCM@6rtY{ldXD|`6V9u^~H*hFK;@K7zGvn_35rrE&%5dA6Mif?4AtA=H6 z%TPN`^lI_O%4MTs?45q`Al$zEl@6(KPKzhLCQxQq0VZKfbm1qHEwTr`h}Vm0)!!M^ zs7YX+nU^}Govx%ZYJeD|-iQ~-@MY#~t*AcGH5q%-Ra$CL;LmLG(pD^6hYWpaj!ef< z)@BekFDoY7t6WJ#T(NvH3-Izs#2fIf*+Nvh>;4Cg~*7B<|+%Gz7$+vI*QspXu#mlaBl!I+G9 z2)p{(uTM?~N%I?2Ez?z?mpT{$d8pTD<}~;Y{GhUCjt&VYCaDY{I+VtsOR=cE z7=~6~Ke0Kd`U(KFTBWO0dBCrxRUiTI9Wwej>1NE&z;B+-U)SXYzM=~|vL-TAqLg(l zj8}=rL?Hs}s<{Oi_!4|4(&!J}s2Y}kYvC|e`PXSU)RQX*C9oH-Hk2KuQ$a7=0IQ=A zqX3a1TOvy}dnIuYjn;5BjjwV1QMVgs&6*jcXYvjuj#fLj#z;=oH2cuh?R)L+P!)C{ zpCx5{b;Vo+=DPWRfO&7Xp9NfyGrA&!biyVaJ1XWD4*TAI*)Zs_ZtgAl#s|FB8pf|z zdZKFd?Hh5(KNsfLzL%hTPl|dd0@jJa1BW9`ISo+*$?0Ly=vId2?-gpdrXHNKuTzHX zGug%|=lDTah3E`5;(FbXb}Z{ngErt0hHPJ*BBrQ}bG57lfz`%rN=^BD(?Em~wmE~- z-{x?OT(HA0_1`jXww~>N3EqI_U77+sUhuxJv|Yl2BpkZ4w~A;YF{3M8h=F^2=R}QaqzuV4qOrpP*9;+wPG$7k(m0Wm zy}v{Dd4MZz0hqDm8w!-QYSd#aHZv-BD5b3i!+T0iI#+boOU>yv&z}QpyZc?^U0J_V zLqRHAY)&axrA4@fF>UrgIB2`nc{ea4LP5i?5m*vCJsmqRYwSX%hn3x-7y#$n#*~gC zQj1s{ej`3%9#`l6g#_g*oKU5r}j6Hfc`N)u?$2K*J0&~jycu9xZh`8L z{7G&RkIZO~h-{posW(pG>}}pelR5D29Hc8SH^1O!$12ZyYdchYmetU)0IT%HHZ}KC zny}^)J=8TKQOm;|KbI$bOHd4PVDgPU> z{n}(Xzqv&adDa+4s7^3>yr%z%uNZfVPaJ~}pZw-9z-S&?N5 z$Eylqrt_Bre@D@x>GnSCO(*`IigIq<9xiL2Mn7OIruD+Q0y`$d_2}u9VJbRbPx$es zh3tar{(>zo5ZnH?NABk^&yDria#l4B`8uN8(}hi6U~aKw7}efl7O*j1%`eRk_xo^d z6MQZ2uv)SJvKmUZwUvi~wC-H4SgXqwP zJ*7w?22i|d727jVsbUE&#m1ZyFec^og}96DJTnoJ!-nxWjiNN*x8Tl{*f zGbI{yZ{80^eK#-;AI4smVbwqsb=O;@5+N#BEqH4_wK6}XCP zXRVVy~O>1=R%xmKku{gijgG)@cy$L z<+RJvOKvwWvvWlp6;UQqXV_oR?=LM|MSaBeKp9&;ElGEC-~Bu%wG!q$ z4Qw8#pt%J*i)v-Y16|tNb0L(G)NH1I@buz2T@>|9Et2<%%Jx`&$$h28ja=(H4r9Qx zkQ~26@?j7YN+w7-Z-^>~0^0fft#Cg1RO&^rA`D~QZC5V=K)UWKJQBR|^84kcRc=2f zVkGI!qyF~0isYE*O1}pQM*AmI3O6ovF;)}PlP*nbnQ*e$)+VH&)ff!cFsarAj$3HW ze)&^GDY|l5m5au}fal&ARql%Z~!{fkZBuyrQarJxe1k|Egi0Jn*F}TuoljYy( zcN;LcP0Jd`9ZX$L2@a`W8aZ#$ch5$*FdrdL2&>r+3du08q(tTr8NN z<1k_)lM?|K^x+6>zhXN%E%LZhBm7-Z66`8I@53o0Tt~x;3RmJ#s4NN9gB%;uP zdYT?^00}Wd*j;-SsILMk*%+1rX2u!@Ws5c(SJFOS$9lp`X=c@0nxkl6CpU{;&LBajlIYKqCLacRN1^i4m6!BtKF$lkff5vtCNCFV{y3NpoqUY_5; ztpX_j{?FO7M0BWp8Y)|s+e(&~c6DEQh-NRk>6|IZIDvh19Sk-z6aF&Hv#vPI^U5AO!@ad}b%>+2s_;rLpB_iuTs`<;*Ud?DAi<48BX%2eOa zg7MSwhKzP;=uuvQfcd@k7L#a_Pt(;Dau89<)O5STuj$F5@nre(8aP= z znaOWlKHOE`w_F~-cc0pzq@P`RX?Yaey6Q%#HR!N1I;dp~!_Tm>x~SXNr%}JB^C7*A zwbTT$0K!2)2pAL+U{N#d7vcNM00B!!``PJefAc<+4;@$;-Cey^VaS>B#Kh1>im2W^ zpFvdCmN=zO@v(X(woITGI zS19jR02oNOI4>8>LK+p>@|=%5)*w(N=wP&A>wpL%LMjy^w~i||UIr0^YB>}*0Odhf zej^>i8!>@D08a24Vcqt0AUvx*PBne%5*aSuv-XA0It2QrryR*q6Tw%YTGuKUivo&ph9ob9TQu;u@J^vJp~2#;g?blqA~_U74TTpGZ;Bdza3H|=0nLM|^dbs@hTZ7iUOuBR*sJGClD zyA1(wIUnMS>uteBM&8EMKZR}0@5fs|>A|V0YES28WOS>pgP)O^Fvz=~q&JPCIT7T@ z+$(F=-{kWiE)@(|?X^9D5wuH3ZVY^VyEhUpmV4W~FN?5EYYU)dMi>a{a;`*Kj{#=5 zuCHC-b^v!%LB7Bff9}(ZC9^-Fwxbt=MyW{wbr0u3++6VKZ{jM;jM&U$nR|7OtjAX6 zv+TW61nB7+J87wkf|;DuIqoscQe_5!6&L4r=y$DTZ1rpIkzcNvngTcJeWFj~4>;Uw zpTR|c&@mgl(>G_;BpknWEIz7=B(3EAR@*+TAlxjCsTIR7mIXQs$6py(s}F;vI^fbn zBMhoZ&ARYCF3oLH(Ow`hL|pi+&YVE1)3MEFLxr-!PYJ+vor_02Kp-jIW8HprweXNn z;Ez0no}{zLW)2?JLy3WY&gG#|rW*wDV zHihfd?@i5?$!tS=#5I7nbq;zKDbxrDa)tZ8r*9W9BZ7bES)^u{WoR^DI6Rd`r>0My zDQJCzX0`$Kp}=OC|6Fisu`tv#a9@gU+EgAebkKfLaCJq~-P3B`w^wNU_~Cr z=gPh>{1@G^t&VNmHahCq9ox38j?uBX<8-W!ZL4GM*vb3u&$-`Qbx+m#;I7C)N`i?iN)EKl}1t5#Wd?=P=M^-m^I^LlmdQY=xb3)>cyYlFG2Y6O3D%cqedf)`cg7(0K<)YpNqE!7@% zv+FOmU6z4?#)ln1SDp7W`b|}xF@$3#jQFa=igV6wNY}bAIJn}L&6FiJsuZ7`Z><A`c9aX0B>(cQG8!<_8}W~o};IS>(cff@heJc6W1WTchz8PB0%H_W#U z)yrgLmRDVr4FV)~aYtIgCvuk;_Qy4krF^|lZZw3O=6ROdk23-(hD>t(cVedr?L3~; zG4l>UFXa-1QO7S$UEDA38G83$797yJByQzs?P>k%LH%__KU|hV(d1v)h@l(-(Ez~m zF#Xy^U3Ckkf=c;^s*q+hPrEuB&rVv9UXhZt6{i$=JDzLrHp@!RmY_l4BL8`1$w z&d;3=`>y3s8#XNk7M0tb<0gNoY9U}SHSjY2*u&INwc3F9Dg4y+bNJSEkk#A&)3QAL zvgKj4_zgG5S>yI|frO^LW@C=VRx~PzJ^2j~M}7-gkD`>>uV$K^B{Mx7L3jv~cKo+J z&6|_%Iy#`I4vsg$PS6?F;B@h}irW5ym}L0aA~q7LpwfYWCQjCjG{Iiwe|Z&9kF9egwI6 zMI@swSx@>6h=nS1PpiIQ0IF$Su2C;;HVqwHOifZ{pz8c4x^)#N9~LCB;~1ix>}Etk z40MHyiYz&btd?orT%GEPcVs@*YBJ-c^a`R*`8-tWrm%=rNgqNIjt=+FHyRFqCO!1v zO;!^`;$j+XNk>(W3|3vdeq@w{eOyc5iC8$MA5r@szvDR5O;6{@skl;wV^!FRCuEw3 z!`t2x0nLiP_eD4!54tG0Q0N*A^T&cLgA;tj8=8mxUGGPxnOA1GnOX2K2d8({NDf)p z{@Bm3)Vk7=9H+~=g|(?vg=@0K<^0su|JyeTn%U2r3P|HD)%6v& zz;^qV&X@SuF79;XU~y9P_;WJ1{!>~1)PjE*OiYNa!M^GZ_C#>saUe+zm3icJcjsb%8kb!BrFLz#}O)7!o)L|#`vgmg@-ETg~aIYj~xkndRq%Hkys?1Ypn^z zJz}nQF;uh_*reEZMu7pZ$t7&RRfG&5xprj0?fr(N0zBvKR)TOU2|s7FECn>K6lW`Se}Mo(bf%~Pmp&{ z0HyAUfYDNm%4> zY$gL?u;|%uH@AT2&4pN_xSKiN;po?@oJpjQD#!uuD_+@F(}nEm&H>+_Kf84+LYD{y z2p=IUVKuFVbr>Z@j5BmDlVUHyNI9y4x{87)tI1OuHsHP>#z!^e@*q+Gc`#`Zlwe*% zOBLt>^mN>Jwm0-202`ko)#HuwKL9*#i|2=y0*orka^ZD@`qQ9M{g59qcfH+0(a_AF z<;0qWJayrkfK75CsWHOF?t;6wqwQPjS=NH4r`1lA@#bM~T6fk@a(^ zH(jRt`#B%MY*U4+Go!;qE~fnjbSfeAO4PhC>UbRZOtUzdia!VvdK*eR@2&XzF!;WB z8#s*~QOEHgfK>VQV>gi&Y_jqTcgNR$d+&>FKM1dE>z99oHBBk^ojH66Be&q0pBDd<{0=fO{2Y*cMD(@~X;3_(v zzHj!C5O{l9)8)|TxR_kd1L!EhJdBlsGfnzFV%0l-fSm=Q7BN)Rbbxe9ozVxD2WIrn ztxTtAfBNuyl>b{erteS=KkDUR&SYZ9<6q1K+n9Ca__*Ss6Vk+%WxS;IFOJp6+TwYB zDFR*T;mhoaG4pO?^$<$^6(yVmY7l&k76m`$g4@ISOYZ-s7XR1qg7FJVZs*n^5{M;4 z)HZw}()#_rc{2VyLgRq`_q)f$tC$DuH-x6ElO!k}*p-Yl2F+qw*b*)W8({PduN3U& zat_oxG`++TqwX*vrU>Rs36;y`HPHPO4^@TValC5C0NWOqDNF{F)YwO{9)%YKQ`eUc zQ1$WSG|i|1GkHVoA9c2IcMHL8R&E`zt9#i*7w+Z@U|mA}ts!I?=Ms#KO)^@)hqe(rQC zSG^nwJu7g2g4`{Za%aRj|H9Y7b*C_23x$RTZX*6g#mN@nU^I2s>(N%9X%!q%4IT%Q7DM(3I4AXH(DvrAaT1ff-O zv{@iGhV9RQb^txu`M=B16_gMq^*6}(1XC~I0CA4*WAlBF_^ke%YKSfJbW29DnF?Za zq`D)aMwSNZvuGm42y(OtmW0B|asU7n000NkNN-{|hZv%k##<6v2puOu#NG9p$G5fX zI9(_j^U(r*^sJ@}EQcWd*Vmp6#gq)M<0NLvJK10v3XEVUgVeES=^7Rz*wXzUg>qo;#$r} zU<*JM4gl9!J|>1vvxiYWB?Rs!_iF!d8rs{t*`80-&P@Um;WVq8hk{!0%qI7aK16xK zAL1&SoT9v)agD5R{c8e4#|KUcW2<1t@Y`z(sX&H{I&FbI(W5-_2In5&0dSLh?4tz9 z12o#*O-l!WW?rWr%{QHK;zBauMi%?bPHlH0A;o3+9%lkgsXrl39qWWZ`XZry9;0B) zBD|=dx};Fv&NtmjteXPOe5`Fa<5t*iwi6CCzmDx$-T*<6G&lQ8P;_bz9uu zyBSEnLhRZkKB9r3p^aaL^u(w4xu>s4&EvZ>gB%u}%r6t%2~25%*~`PLy#!pouQ0${ zrIhh&82+FETJBj6Uy$Ux%aAtOOt$v0J z%YtfS*i1;l=NI)bxg$HPWuI`4ccu+96Ok?{N}O!d7Xj^BE14u{RSKD=ezjZU<9X_< znrBI&f5&yOjnTfXUwdre+uRudLkT?IZZCdeOaSuTiIjw%lNFF3VL5^U~Tvh zlU|!{g!{P6eDROb*@7`sjjjv9Iaeb@|5$uuLAp}aIw$q&#H8&H8I<6B3_7!5002OT z46uhw3eS-9g>#12KEa1lvR^`%DC>YE62pBBBHmF3;W^E2b`U_DkplX|sbWrqs*F`& zDFiA{;PpFlG&$Ps_OjHx^T>5@70o92Gn{e2Fp1znB~mdrSOF(-qaZ|JD8w8a zGPD9FB|^9iR`^*ZGB|4B@dNAGUgv4G6T^u~{n$s!+(6mE$il2{2l?(6+H;wrKalP} zz$Aji8%`SJ8X2IlUlz$K1>kV-xRtP*z+JA3i1rImP-YsTwXCT*QLklUk2rGl61?jl z(L>U9rnJXR*KmEZz=j?DV3Yn<0zL15z2^@AAOoPAt{OA`p`-5#zGFyNZyM{g0xx{t zB+ec~dIE$Dd>@1z6x#mTO(e*i34TVK?aEu^FV~p~M6X2*)~L$=Wvb4t%F9N(U9Z^M zYd${G39`et7x+!$Sxpk4F@xs>+uQsw)(JyXO$_=D@3bzH!f0E+${W@FK}~o_{$g8q zU3RM5*ty}0+=kccN*{)?PenFjwWX zHqg6+E+5uNA&^BBi9nGAJUpWP5)2mBlT8vZ0+MyoLH8OY9qV{md3DA073i+A=#t*- z7pLU}!*bklWthu!g05*gLo}c~tOaW|lsYk6^u_9tT)dh65$SCHgk3g3&izivfhzmA zQd6+!&E$b2Y;y6+x&(W)K560WQC&{FWSO+3tbW<3I=rZP3Oc}Zs`w0&(W{Y&#Y^5e zF1^~A3!D1PbN_0YOZO57OEEKBibGUkHT%UI7%p*pY9eA0h0!Zr*?v|R?3sPj zhr-a*l?S+6f89G*ZwtWKyOTcoAU=~!!RIXl$!iBbBs!m1AXZb`TJI%MkL!%4JI>+K z-i=*fn7TrvXnX&4vMvSb-(ZYHK$+uF)~`D{F}%?Y@m zBS!6WG$XF}?X*Ev*Gy?Rc77?CE5@~T^Ta-Oqi#UX;*U_vY_^?o!`fKS*J7-^XD@|w z1^4+ve+9>o(~@~DW?Q8uCG0ojzs_jR&5TE_3<TAiRM%3Y_juO7wvM3_RN!wi zi6ftX@x&rJP}J(HVy(9pu&$;pFWC>vV_a9~Km#D)S?$VA)&t`BfYM&miWRhksBlq+GqAtJ`??}ABT@^4Ub{R0`}V#`vyQL=V?cFjZRR13 znSO?epV1ehW(iQQPs5xl5{ch0IBx3c)QOR zd7TTCso7&^suWtg$a=8CL5G8oZrnj;w>u2J#7B8yhiQRTRpqA>s7hfSEgc++j>*z_ z`VY{HjXSD+E|H>Q^Q&(w+T3(QOH+rUswBb-lj8UY>Q9tY;zZR*#0#PI73Kw^pa$S(=V^8bf#zzqfatro$I%U{m(Ra(`IArl1$ahv*^x8 zx#x&d^7H_t(`JyfkxZ@Lx4gc+&jk-AGi!|@J$d(VfZnb_A|8EQ-kdxHY7o}_d39XIZ--S|Z zGmm0{RO5X$9ei#6y+}K=w8?~f<7TmWV3~R))m|tm59&%~@I)N4u;L_WH4l>4Ae3B2 zZH`@xZ5wC5BOlV8LG9>l03lWIm>fIHNYt;UE|%PXx`;?h%_$qAfR;6zi`!&Haki?Q zUx57HMgu?Klzcx6FAQyu(Gd!#}cr?c8)k#t%J_lnCh&z<_v7#J22@gsWb0>ra@ zgN8pzx>-1P2MSs$O6#kfdiwfEFs~pctpPy(=)2lsD9`tkPCMM4uB4SiMWIvHUYn9? zq#ygR{3jnzdp~Wj3Uv9>zQ>E8O;4P!(6sr>$51dq^7pH-^6J<`}yft@Hb8CVOM|fDXcR`Q<1S@ z_>}jLE97|az_h#DJ$SA=?G93OXB=r)uI9(f!VFbvGb;7NEw!kIf3A&z%Vl%#V|hsn zJ5szQ2+q(oZ1FegYUffPog&*~?@Ufbyh7`dXd>|9kqPQ#__#w}B;uyt3G9+zH>RwB zZM$aaeCwXt;7{s-uTNRIgqyi$Ny&5f-196llEQ&~mC3E=Vs}Jfj>JCV_*o-9P2YvO zEfz2qu#KH5(-XBKRekkr@@6#j%vT(z;iIusGQ4cI))%CZCihghCy#*cWd*O^+HtpP13WTAkGd*g*S)S9Kdcdp#vxc)WzGI`UTlQ z;jj!wsQ~5fa$k3&_V;CBa~+FQMxHK4>y3cp7dIe!wSv{YrTuEd%r@QyAeoZaOiL}phL z_mDcyT0x=}XtkkT3=jDsZax=!;Xfskd^9XKx%}9zQgP&R_m`V^5OmHU_Gc=c(BgvI zsKfS>WDelv-#Xzc8!@AY4|+agb34NH4T$hR2K>Yh7mG~n9@v~s5_}ixS-yMu9lJL0 z4R-N2WT!iO_Dp-$gX`?kXEl<|##8mQJ@nz4A*-4LAc`f(ctod)EO%j=w>$L6szNsB zy=P>KpPF#E_KhKh;abzIn~KxsTw6F0Rl2PzKcjdZvP~UH#o_x73Qv~d zHcXNnZ$1SS(wEwlA!2|z^L}2wc|ha`sV19be2A3|TZA-ukvIml7rGn^rLIsjwNtz0 zpMG`mQzb0wZ@yVcrMl2pM1*qb{16^MNq3$}#nn!6c4-RJKJN`mrtJi%a&C=C>kBU? z$gjF((=H7*t+J!DB-wvnT_~coHi1MxpM910>mDeKM)!oPhyf%G1*aFi zkuI!m+I-H^l>hSAsK3#AFtX1LOvwIeaDwVu?gNY5l@7IOzlWBZj6TW#FG5ws+qsKk z8YlD8{O+4Ey*;a+L9@)1EmuzPh+V5(>1Dhb2N=SxrnD-{iaIO-rsw!##J7) z)}oODm5ui@Wt(%90|iE{bi;kQdS%@+2qpy@ z2&yOdgFBWpbks8S!|H(bEMM(6C~MO7ZMDm`sEDwWo(0nY+_0pp*$Nep%?M7T0zN zp=gv(tTVJyMHqZnO-*fvMhb-p$q){i+9N~%`7-9EqC2@KtdH4o@_eb1|&ly*bAY^Af?tZ z5ksrzGe_W`^R8mL3GSm+AE{pdJ`f;;1vze4WG>bnCm%`)q&o6g8I94H8iQr@nCW^w zoCCp^03z5_9{ZtL-G=lr<14R47kANOV&d7CXph!}XHi<(e6Iw(>Ychyzh~T&(>@>v zTqPa;-O3@+%FoUw97KI(M&rPi^b`cMr-zHv{=nW(ONkk@eaw2qVq!XDD@k{hT9Q*? z)m#-?q$phGw>05t z1uMWv;qE@aH$<#JYgnIE0hvQbP%gnxYjmtzePo-hH6MseY^ow?K-ogSxkoa{Fz90f zsBo8WJ^Se@;3BT^x6N?zx5Uw}eGx0;w}!`nC_J!^6zwenV1g+4O$b51r#slgzr(gF z`7f4;nRiXz1Gz$78rsd z!&6uRte#gYe-`x_ZRqxpSe?5XwJyfr2-H;WbDdd!%4>90NynddNcV_@B z0I(-o!mPFN_90Oqfd|)XBBGQv=uBF=T|Vp3?D#rwDE$yoIWLj;KxWD_U2`20^%$!h z5pk^u)!nrbWM-s(mvUM0)OuKmf`#j5Omy7B;iWa#_GaaTY zqpke140qYGH!|~K}=t>PM zh-8KA;Ubxo;h6xo*$k!q4eE4seyM02_vxr46Ap$4DJ3S=g^Kq4QzKS8F-$HFeE4fh z5F)H7e|UxyyPVwHFWbc#HcJ{mqSnww)kFU_L{eXG0U{K0r5K^CRfod+m*`{A zf@It_fd!c-n+36UdyfVrdFvUbh_Nf!{)0r7y!3EabE~w3KqMt$77#@FzhLSRjHYKV zFN=n7h=Y5B+LJL+p&BYqGF;dYCWQiyvC^4pMPC|5;Z7(29~mms@cE%S1* zmKGm$c4@J#v8ILa?{9(;3Lc@Z=B|5{z8TG3bbv%{8pot^S-Zhh%Di3kXuGE|991t* zLVA~sSxx6NmmtW^zu<=Af=iH?wQU|79i}|V-Zl&!XV&9yx`(+Fh?F1kRGdc@#gN1@ z4R%CG4iSK(0ASWE1hla~AVl@IT_St>O0&11`3Tsj(~n$c!`y7HYv$96hriFO`m&s=LH*P`p3Gu85G|% zBbjq;yx#OD$cS@$Vh)niDv5lT>a%zJQpYHg9{<4VLGbDA>Epw~cBaVr>3%7g)B}bK zX#84xr^=FkuAzgRyiTM~lK+yIx}%lwE-J&Hkc6tz#zI9I^DZDt#hpTBRwbm4mM$KC z&Qv508wZ0;f{lI0>Ht6l@F8+$ZqGx@QijJ>H(eL$I(p1~97V*cnN@@wV6Rg;AISYK7jQ<*(ATl)Zk*@y|b^_t#s#vTwBwHCL#g0M5Ty4-hTFjG5u9w% zZ#zEzWF<}={iD;7p|Q9c^OFt!gD{#ghC}HfJCbKy`NU<~=^7L=>kgS>b`*gR< z>*#!B7-}^T(i0g2VtHqeQY(6P7o-Tz?%)dSdP^|AJUW=Rp0_qaP=h39bw6odG!RhP zL`~rp%wcU#(RsPSt7HQhlDBOD+H0gVk9`{c15{(YYQHd$cmypnqqkQ2bo>Y4f4bTe zA4EGGzg!RtLuQ^+4u8uAXT3*05!K5nZNH7b=*U8WHjR1nRsxW8)V<};sWANwwa~r4 zteIwUtm*xPwCX z!MFX++G#1|jH~xz$mg8pZ*x0h7dAkxG^l2Xky*```>8?z zONlSC1Mh?9!}tm+vQT4VF*b>K2bcr~ErQjk|3{SiOmUz$^$Z1<5-H9~Fzgx1Wnl2Z zmx|0&oNm?d!tAVjh7v2BkB$pfiPL0_nDWFRjkM>Hm;e$-W>U?dpk!~PYB}xHh3{kj z6##by{$|h*(52O${tY9;?q{R;!r4{uIJy?IT1-zd78B|W-F5glSk=a$$fldcHi zV*neMf=^!wt7V7S?|Yh{rBK-I&B7_hFS{=agL-6i35`D=XnO@P|9Ye9-{ZvO%+7VuEevwv%6X6QI>rG-q;I-3Cs*Iym{*N`F* zU+v=~7K2-GgCQ4g0M0nLia(Y#eL&HO=7|a|Y3j?P9=;2^8r<6}^M((NcLiZczIvUJ zxHfO|=t=bZKlDc+FCHIbVI&rscIPafo~Zv{Jzw+0LgI(+Qe|!!Q5Zz>6pVf{>6lPH zMD3$mKlwJ@VJ73!$eSoRMf$YE$k`|XWG0Xed}O4e!yy7ia9#l?r+-AEFPo}=?;0jb zgjTB0P2+_cEBUHB14wT432|nOirvJZLbWNpPAtb@pwJgWC;kxOpL)Y%7(KQpyB(Xg zQKLbn2b1@M2i36f*vrlK?#@ASMP0&u+6haS=b{w8jcKQCY+ff{<=V~~P`*|1ds6+g zSQ$itk2^-1kgWT+#3rM!`@e!}4k*Wy>@Fl73c@vGe|ZIgr7enmzCs||4lLa>qiX6c zz0nOKM-nH7pN2Qd#KsRK$c~(geopoWK6-hERJ>~n^6?lnzS+ihGqM1l__dA*>U#Gc+mFaHdVr|tJ?t#pM zl;;wk?xh?lhBb9Ov8ZS$l~~6e@i5zMD=xlY&n-(>xBQ47(QSlk4RV}*vknu>Okc`$ zf1=nvXl7K6bysk*h#Wy)=3}`FHMBw02MedJd_1rEO7N3yBxd5<6^D|@R$Whc>{Km& z5<84S`Uahgze%d~GwOlWMs6pW{ZhYidfBe2|Ba;1zBzMH7+W-t6Z{8Q7Ruzgn>QZ5jPg`Zi~R)p~gewJ^8959q1655)kE3v$_ZoFn*nM8CfY zrVGSyKr99rV+w&|v-AqTB?DzWV!wc~>t}yly1g58-M)v4 z)r*`=ojB_l_+>Ks7Eyiw;{^xcneoXm?Yfi0H1_ASHjUhc)d}LChrUyT-MVu(QAsbc zy#a9~1u2F~qzr2IC|{PL$0F3vl-2nNo_%!dgfFml_T0E=p+UCL+2Y{KlV&SR9LKbxp2=Zz#YIVMQjrye3`#_Zg&d_eaS&K0-9o zRP92xM_B~MKS`r1V1neAjcwcN>Ob(*elHrSWP?WKjWjU;jZWo*xuRz6N!?D& z*sj4^-Mrt-yH%k;1Fcu!-&>}l>ATV*YZw3XNNb@faJml_TPx29_@%T5oml8s`t)bF z;*(j6OrZ|iAq^4rprXJLd$Mt#bp4|sXUmb!a)q;kLnP|r9$B$vxSv@0OPRhVOLrL3!@u0B=Zci$i;*X#RaU4L@%%2idL7y zi&^XcRpW|x1{F&2YqM*J0sc_ja9W{^BtiEkX|mqK#v%!}+B7xt^hKVUV z#Gj;FWPd>*e+tGDlk2Ko+E8oy`#&eR=7K59>6|GSgCaad0>Cwvd9fzPBe83ZRJW-B zZ|w<%&nwH|WM+HWO~Wg7Kh&PQ?}!;+(}vc^D2oFfg4l}$gKpB0#;+Xz5l{~g3(0X{ zZd69AvnXVsJ}88Q*soUxS{yhkQnAbQVQEkAwRUSBCXxb(G4YdH=KjSxVI4g!{2)mtJVYZwVl`L& zaJ>a<@=~2K$|s4O>WBygbmxq^d!o1wo+?%Sj>GiFE&Vo1x|CEEKXx5=CZ+_Itebal2X3AA?{NNCS=i5nH)dXby-09EMmL1ST&_iW1q|CzTry8cwgomG z)fpYqWLyAq6P)0KQY``nrujHnl2C1%Iw-e<2Xo1om1Cw}uNf#Qw8L6uMX5FN1D+d= zTIoR~Z7!ITj*dRsCA3t5YsIK^&4fCcUNyaz;0kCCjW`se)YsuP0YqI zd?S`$y$iX_c3xjo%pQ9kL0EDCdenW2df8q4qZrX$Xjykiy2qJ!TBI3MB1!k|_uxSL zYtARjA5-ijyPzFNC$ap@3QK|FmEHE$_LGB3$62T4e^><%I$&*fL7S+ImeX5TAN*N? zhI%a@2L(0ResHG3ZoBvEZcCphh=01zVJ)@AZ<<9qza?XvFBx8f&bE}*n9%zPsKtmk zFKv6u93&jh3#%l&B~i6n4r zv2nn&eSJ({EddSaHSNVSD?3>{M;q$%VA`h4*U?8fD&&^M^ZLk}Sp-%OHDj1&dRe?j_3yOc{Vio4!yY_80iVgTWYjcH0+f# z(vbt$q;EY_or)Xi>=zBiZ@iJf;ScaqN`yNc{q27iGexqX?-CN9Yc8w!PJqbWvL7a> z64k||fbE{&#|%XiP6{-mCXx>OM=m1J%x8G(`^kK4eNk5hB4wK|O`p zCnX;=u+_`AIiTKM-~hsC<3P#X*8QWM`|G0j|JtU7PykrSHUVoPW%f$fPEK8uYue)irl4qYUsz7~LuU8?0EE@8l0SsQ7{xIVSEk5?FJ{TinACD@iJu7C zSV)_n?T3O|(Jt-5*`TbyjrG6fRGeAW&ghTFf#}U?f_hKOgNAmZ9W)reFr5IXGj>&~ zYf;Wx8v%+}6V{b2>)>!o@iPcWF*1`b)+nCll)8Jnbj!_Vd&ac@U^}9_3@)z>c#k%+ zHgE|jX#hMbqLHC(l0{z}{-Ld_Z4k1Vfd39M_?;^xU7edHdm{5;Mf2D9D!a|T*mh_dq8-gD-cz*jqZE0M zr(Zs_&cz}Bjh+#|D@`g(94oyiPf~m2zV-fY0GkU70~?UzDExH54L0ieB!#B6jc!7A zKNR^}I*$`%Ye14&&KJOkeH)}<9}VB!)96Ta+Kc25Nn%4ZsM78nc!7=5XX;?6uCc8N zL^W=?!1Hi0ihx*SBSyxe4_qyDA`1%!5CY)(jqA4axC`b$Woc_Bhr4wJ>nUhQW>-K3 z=%+P*ykbp*2Pbionye)?xADkOgTABTdWwDKoPm5(*<(@J)F_H4%&~Az_zwl171K4C zUwE^P-k^@29MR+RiJd}=kVGR+#6(dUhH-x<&}l!qoybq*v99LUd(3$>xW|wU1KT{* zSTme~4F-93CUkuEOlYX!Yl+v?jP}}}^;6>i{sSa{y=dk#y|4TW?Ts%rC;|TMnMA3$ za9S?d&ekYGSvZaEMb>xDiK8f06e~2nVbgQhXhlq@I=fwUu8Eht0~P%G1YG#Se!^=V zmq&`=={!m=jKWI^BlKgKKUUCExJR{H(qCe`xgt{I&I3pObQ5UT3h%#h{t^f_r$FbMZ^$URu7xA{ z^Gy#b$Snuz!0q!DT1@$0`x5q>iEgR?g4eA}YH)rWv%M__>i4aa@;kk{GX0{NbhzYf zMQS~1Pgf*i^gNAe7r@rt03BeY5;7RlTVE4?K>Jblw`SUS4Cayvf@XC8fqXTHg=r=A zqMQj69OExRp~B{vag7nteiOB{oa=w9ZUuO%t4*ta9jFhc(aDWv(^@mS)bNvU(Qch{ zl`nTP`=~PC=0KF%;?y2$rpy~u$OBqozA>wBqyh75KreN&wc3PT+Y7l0b(O4}>=0n* zUbs=ULT?hp2Jek-B~y^HACjgR3_5<&T677Oha##os(GcP>^tz#gf+^?^|j!w+K8yxDiyp)nf;vOT z_M+*@mEhgB^mE4&1oLAl2}~b}Z^;{tnQgk1lR%qU5y9bVpN zj_Sl3rMnxEVQ@`e^c(y9R`jRh5n8p%Q9(u!8e@WqLq=HvP2qj#)?8G4#L*D2;(^Sv zQ{65LQ+VEarKk4m(I7M4qmw|ck#WW{?kb`ML6TCf1`MFzyk+m|*_H=XiVVtmBmFq9 za^X5zr9rwv=R#S!aIFpt%%FLn*34ea8)esmM#iG5< z7Y8i<7@jpraNpER0hOW%E^lFCD?Sr6MpbeNpF$sXSEAGs(DbxHuxobpmPY$jqUUEk zZ#e%4ID6{q_a7>)`<`r_ROm7|_B(T$^n1x=j>l!6`EuiyNY61l;ple{`Cgk1KQT-X z;n5TyqFe}k7bK}0<~BMB{l@TBhUVM}0yi613jD-97=3ZSV#&qET1`05bS z7h+qDt_>7XVfRV_ybAvYu$%q^tO{9v6{7$4)cW2(Ok5z_G{Eb+!aQcc9VH@2$Ctsj zWEA)bE=cF@obGS=P2D(5L28!MmR;v4{t68CkXl>n7?tzIJAvreV9T%busZ;^I&9hy zLZclZ?8?Ii13(FeTD$23p9O~tv#bFW11kUHA)l5 z90>!032l@KxFCbYbQTqi9!kNkU@>GE5_lJtn~Y-3Q!no!haUq=pNz8~gFga8pvWpw z6jVU2%ijVtN_#=K-q~=eugZQN_je!fZXX(V`8{XUfe%DM2%S!c!tUMnf&|!p-htD* z0Cn&V+=b=SD&qay8TY0CK3u+6=++F(^vV~4*R(r^l|i&FK%3gS&bt{eFsEg&^)9d{ zsdDlf_0pHG=t4+oF$4!TDh3(Q484OW5!(O_n9l>hn`;;w*~IemLuAKZtd;+H$(W!t zU%aznAruuKocW{eo{MG`f_SeUuU@}6Bxec=X|cS^4vlHbI4pH^5rZ`JwT+=j=RR!) zyUkh5?YqP>;Oz; zb_>|}t81i%vz6%zz~Z1BfAvk7X>RpKrkmf>Xe4wDR^sUN1CXOvHp>fsMY7RvZ#YP- zIL|q?3AT5WBvawlNlvIKT=W;P(C1|T&R9GfJ0?jOC))np^!%@>gWNDcHlzIypx4~G zz1%rpm4*muRmo$6pOMynvWPW&?mK65We>@v=7+-^BMs!1gm&1gB6-A=xuH-h{24#( zbfDiaPbH|Tc$G%*HwIyLe5qtz>WbMbwz%{-1whQ!r|nraQJD%$bn+~X@IpW+V1 zPH1LQQ^&wP)nm?hgA%*_=-RW&ShDPjtD(mueMx)mlvBhj4st7Gkh%+1O&KAzl!3l% z+dl)BWEMu)a>wh=pJFa!uwYXWI_{JpCA15wf7@;6DC5E^g#Wx7`#Nb@g3I1xru%K} z3jjhK%%lB_!YE0_y*Bl6eQx~FHDTWu46rwO_?pYs*(`$jh6*x;`ckzlf39ZG7lFWj zY^JO5sq112thn4&EZ?l_d?U+}E0%ed4$gt!pw8~3*4hyj8=9kA1%HNKn9FEvT1OBo z$+MfUzfy=lYalbF`T4?Q6>?A&g*zKvPuSR|x=ZF{*fK|EQZjNAU|l9Xm}>Mv!6f|z zx!w#id{+T>%laq1S#C3(0#};bxdCiwt43#V05Sj!**77?8@J%wHCZt<%6|M2Xw9{|BV;T3^=*cpBzxes{t13!U_QqdM#o|Ju#vL>fsi|+-iH@v z--E7_a8|yFspFMsOvyW>j-MszahY~9BBa0`(X0i!kSyKyRnD|B$S$1>O&E0e4?}gn z*4Ulb)PrrbH{&hMg|1?n0N4yk5_Y8%$VM?ORC#*9tGEiGmTf41NYokJ5spN{_ih{- zWmFt0d*{m}jRRlE@zHeP{6Yri-3JiX=FJ|##Zl87xa_SJp%6^#t@WlT`!#zz?Gb1y z<@Ukga`;kKPQbNv^m(6tABTJ$r<5AO@q0HpXKP^DTMkmwb;3 zPsXogSJodgPB3ai*nvqJOUc=W<|)W@sL@c+kVoL#{{gQ&bGrlby-m`a_SYi8B_X4@ zUh@`o{w)X+#ooqg#UzEZOJ4Ptl{oj}%_Odt#`Dv3yqf#np{v*bEz<|56OO!JuG;0! zm$_1j^4=ST5(w}@V4EF}tL(Jp^T^|_UsfHDQzhDtoWWbSjI>ZP9&&h~axYpx|0o{f zV~br26oyfc!tr@_SIqb!8|!;NDmfzi`#IiV^)Fq?7ico?d3|LQWd*9E*zaXJpE@oqT6yK0shdI>rB$=rLj^ z>f5$ty%U{Tq6E2gD=5IC^pCrA8;<843i{8Q$VEYd#=Mr{_?19b1Ev5bjkv_!?ad!; zF95BC=m^I#btW)QYXpPh(&pLoe{ahdetTkBeNryUFr0#*RetQ%_lU^26Kvnvf=CpE zz}(kbjNTH6UFMLq#r3Ibw4ea$7%iRk7bE7xPk|)loXn=PZP>XP-jTC?QX0?|ci#23 zUV3W;cbJT#Jw{~^Milz`PLb4DVtANXQHX`mXokTlwCH80ZECe|5}CVFfmss`iL$%l z^dR?-LJH|<+;jDu?&p~|a$9~K!ezvlc=Gn?UaupeM$zx@;*Inq&H*f}Aq$L&GL?k% zv+j`hx8n~kISYcx#*2yMPN}W#jR-*z@K*_p3Ukn`I_Zu>%Wu;LOc@#H6LVzJ0Kzig zQ078MSyChzrWcU*f-dmh#qY&@WmUR2p>Si@AmTHiED6Q>}WmX)-!3m1BITL`i;t{qZgj6>RXwF z$Ont-=MFB#M%{L0;MsW@Np8$Z-!G>59xGFPM8_A)D)>N0v0+wgMRrfQXj&hl;WLvl6NxIfB4aiB2@K)%DZ0X;kJh-X9ZC>|jxqx?tXY>5pnRmC+ybq?XMU{JTON_J^fU$0 z&pP%78W%VQMMAfKm#txy6NINtDWmDyA|{t1eopmHEq^wcj(_4t6&zP^ACH|^?{H4n z4jErXMfo6q+bc(p4FJq-cMMOadnT|H+cNKRL7y?B0)wQ>L(8H4Gho z*I4sb{ZPI@oHwnW{P!s4r9Pec)PJ?e)tU+6*VKE8=`e7H>SR47wj3XFErRZ6{SPvq z6D`BH#)BdjZ!?0P4#v*pH7k2ig5=j>kLIAWqyY8i3ae}|3fu%*AKLdwzUu!ES??TN zN!YdT?qtWFOl)&v+fF97ZQFJ-!Nj(0+jcUs&51c@zwh_`P95y(s$Kn0S65f}vz}V_ zz3$7j1!HiI5r8RdzHV-@WAo<>Z=!qbDabh^fc3tr5T3)jmsS%oCD=H`uH+##eg3Qt zkEDMs?8DtPHe9I=a9{#kqIfr*_0i!Q!|Hy`yt5CUjubCV%Vu*SwM^8Zq`rcG9YBH^ zyXt!0u1@=r{qF1S=O4CN-E#bQo$h}8D7L_%4`_gZ7$Bbund52J(|BvgavFMovm&jZ z@A|u|-x*!A4iw=w>nQ;pEpR~v{}u})vY*OlK&;Yx%-T&9_Lj{{U69E&3Dvnj4?M6=C01zj&c z*jg>s|K9tta-G@R*Y}54X*fN35`m_@n}uK!V*!?m1@kT|4->kqr6FSGHPdtqJ8eHP zSPIdU>^#gRllSG&1o4fhnxzYb5m1canC{O`f6PYB1obK70Dmajg3kdYTKRQf(5dw! z^}9+ACv4Y06SVzqU+*m4jSz#!_*qf$GWe!$shqn}d6Ezxq?2*91npL3(U_Zd<_kp2 zpY~jyAQOt^khh;SDo9EA;>ZxSodhZ%@fBi`q}aAsGvfa4qN&fM5#sI47oKUMF^cnt zz@^;y66h7KJcMBAgTQ)7n zqf^1qkW(o}D*Qv=I)0d1_p035?X+*a4%Rw}EpR0H?rZ z!S-D#XlK2Tq9&KP_*Bf?bMO@xlX&{Lq_sT<-cf5SgiIUz-GN!KEBeB}OxZh_;azPX zLO06cb_AqT8NgV9u5BbzpM^&#)~)v+ZUDMX`uRm8n6ktZggSG z&Q|`SephEPpqEs-S*G~2eF-V{%j9-Aj$#N`g}?`GCqcrr>riGEYwD(#)OT2y#5Sj- zaST=-s|%vLPU#06!cYlNtm!CtpN`8Oj4Xf|nowd$G6<0mi}>yZA8;7zHI|K6BY&3`B;%o2Kmwc&iF+gq2{{w8aJp2*) zN|9oKE%uHITc@_l@<4<#sYq2<6CPO=B2QYtOppYt!)>FbwLU*Oir@*}>;_X|Ux94x zZqCfTPjIxMQbB^mp}f$y01trb1H1G9OBqoOTtWkA=?x0cRV~)84hio#1!N!EnkD_b3zF(j1`|XRlNBSoG(STG<-vK?Ujt zZ4YrlLs<7x3!Bv4{x#Jyt-?+arjmuzbPOX5~bPQ?o1n$YVSEYFh?M*8eGC_(mG(dj+_Mpt#B6_X2oIbXMbW5Mg9St4=MK1aF~XnHWnOA76WO~i)yEB zy57yIQAtpP2+2UkL9Kgz;)VUI09z=5HtGkbxy{b`3ZUpKBU2#rQ)bU!jlvx(xQJMU z-#oT{UE*F21{K!igVua{Tka8;k2YqP*9|(H9Ib0c-R^1CPlEFV{?P?FB7EzU07kG+ zZ!L!%aQX3Uyw>br92m1E!R%hVGyJw<-8zki+&;pjRa@5%Xvn{6b~^?P$D8^_19<>A zS>IZJg@%0o15hRFF8Lb07p}qH$7Uy7mIe;hoS?jsiDdfpmH$d!WUyX6Kg-zKY~+m8 zw|x%QcjFI!fPFhYPucRne;@3R2PnH9mM)~NVA@XvjRKHqQuU-6JRkh~TZ@~(xnu?W z2omT!kO0hGfr}8{eTv77-Cca-C7a`=fg&sg&8KLX@6J54nWGd0hNv6VKx;2)%Bt{` zeq*V2qc}%-;8*TUKnIn8Gbl=efm3sivcuY(@?Hki4MM{sx?H^GKZ@@{IAHX!E?J`K zVo%$1dt6I{1ULXJVpE@xJ*kAQ(y?)U%So+-ZbW`w$GHcfo4-2bJEf{TYrTK}2gs19 zg|Mt}<=OhF{T(K%ntG9-66j?pp!!1(%nZxMi(<<&$T~`NNbD7HHLewYlJ#wf8MEy9 z_qb)QfH_Z9Y+WJ~b7{z{}- zrJKPhw~TvV{M+ZYOAu}bR?7TYg6 z{B6AShc6=e))4B=KW3MZz%CZJB3P;xP%hy3QwQj)Zbh}e63gB)4VQdSn~>#=WJkja zA25Ba<(>3b1Iwn!v3;p4Oj!ke#QC)Z)ykdG7Q*YGz!?ZQK_DtSL1mqL;vpQj)ob1xw`66DvUW#tIO}I8okShiPt7BNQhVH z#h?uAEn1)4;Bv{roUwJ>G+pdfX!U&;waN1Fj}az9%(KK z(i=&zr>4qAH#-zDbV(0`pVLdonj5*z7(9xTUxL+wo;ui!c+SVKDp&SHG)GlIY_tBP0Wtn%fvv55f z9?ePv2S~hbCm@e+2l7_64;Vf{X4Hi0?qKfI!(soE?&VcQ_Rp`>6<=QJ$h+FH7%8p! zp~4_#km>SLq;)#w^i}NHDLGD+ushMWa+H^Z_KyU+da2uY3uccC!tkPfK7F^N2h3)9 ziK8c@A^z(jMe8CVTn+{!-GrND2Hf9iLA(^O4>!s`n1A$<6QM4gs|R~6hQ;wUB2mBK z#^=zs;^BtyI3={VR=h7rcw5YjYD4+COkvWV;;L;X;iqHd_+LeCW*e3ky^804S|WUY z?V!+%l1=Mp)n|sr-G56vdrRqr2hIf2jQpsA#?T8EOw&mcMnYE9wbX{=T^Lno@YuZ< zv|-aLX4SsrMcmoI-oy1FpG!uXBUy-6g__1RQj_QU%Z}TI!wXo#AsqNlX`!VJk~&J8 ztHkJFYq}@W#Y{`y>v24hDk)E1%hA~$dkX7Y+&FUZT506`xAd#9wj*c|55C)TIC&yP z)MM2^YNqdI|1mXPOoNl+dM#5*b-K7+U84 zjtAmu{4w2AZ$if`V{<;W?flkfjub}qg_n|d+g=#`MAd#Aq^tR5uQe+Fv*a>XI1e=m&ML z(i~{f6S}CrTsr3RCe7ALdeyanTS~Sg;WK!<`x)x4s&wb->SAHaaPfdS)OCc;9JGEs zqebNah^!(BvKoepv9zYmY23zC8^zmSFu{u`cm938nU-tn+(wD*>nBK{XcVDv%-`4> z^p1%5VSAZxOJT5#Xc5>`lp>hj3pggdl1e16r0yp+$coln~(D z`yBUER0!8g9WQo`8?spv7G~n)cWvwhX7FH*mlcGE#<7iy%k&B9rMHy6fZ3{j@dT}W zT36ZrCmUrg^=Nyq>@D|qU}^EaeZJ$@Sx`56QLzl=i}^BgQFLgYQ=OD!c=HU6VZ#TQ z5O;W5;3fAH_hzs&&G9D1oa4t%z4&cEl@zP}kNw)Ytx8}dglG*wt53FoDy(#q*R$0- zcj3F6_-C9YKX7?KZPvMjc~P^Yq5djK+{ z|C`r9l1ZaV-f~)1I9j?fq{juJe zBYj|pINoNRGUOUI(!gpo!C!I+`IkB3xH}uwQjL&U&eG>Tgs^lfFk;AG`+#rOKh*(+i5?UmNq)*?ru zUdGS`V@Idx92YLJ?o-4SU3{hyg>%?by|#DoZB8qSYfxHoF&&0$t*-5)jW5=-?!lMt zyDsTpsEO#^-h&V|c{L&wKtu{@hy+ZUxVQPD$Kc`WMV*|=L#{Gci!}41xR}U;>=}PK zm_i554#~pS+!4a$Hjh0eJ0g#Ej|5`VH_NPDB=wOPf!*PYZ2z`qR-ZM-yT_Eps$0KQ z&AJ9iYBMmdI3URHXhk_n)w(7Hbuol-Cna#{U%j{yV$+Z_?btx&0IUzT%iLZ&*B9@J zA!jK16E4~hn>Ex>5rY}*YMf(8x3)FLrQ@2pA4xy5uW&jcH({Y1-E*;;t|~1Wwm>cH zDjuvoTI|#Ee0Ud}yKoVPz8k|WtzgsBXro)T*0CW<{qtzx!)1=sRqo{BUfMT#DVXQZ zE^C7gA}9mxI73^R{BUIOZI3qmlqt3~H<@5Ukl>`7d$x1q4FBiS8IGNJ_vp{xs0)P) z9tXTF(XW;t%2yA8I}xu)f}>knuQdTtzqC&t7>4h3&sn24-|pKk{tB%*-ukwLTs+>1 z&nz{ouvuUIZAe}}S#+=SGCscePUVH88Rc!z(+CR5JYkZDiE-)Pnq4Pt@X6<0En9IY zMCVU`_qb<=8l0y)aeMCak-s^Iq))N2Kb$YTL)HT^A}7jJy477{Sqk6fGMNeZnsXbS z8Rp;q0hZWZ`!{69RlT6cE@SX1G=Z}cdJ~Xlp9;lrKK}qSp|e|e3>&Acnj&oitHDE# zq-AtVnFJgyv8+4}mNt>cT1(egZ6gqeAOG`AmC z-Qco`AZQ*BI!os`(k^QWBMfg}qXj7P53^r{bNP8a#eaC#1CRSS^FBvfHM@EbZjs<^ zpXuS?Sjoev3kc*}^!U{RiOF;@z6Hg5k?=*y_s{@YDO=c#o+ItFz^7=oF4CF)V8-2s zxl&DwtW;y)rJOQD7Pk9`{sMuRy^@CJPQZubZ(QVl?nfRRPURP~u>NOI?+o|1Z91z! z+pG$9a~j1k+hE8P%LiKQA|iZfHpt}SlVqwi5=f3Vl2g5_H{tEejom?H95=`s8nXuf z{fq10ZL}8b1GZ1jDe3Kh$piM#RAM7!zGS43h34A1^Q2360!DuOeo+O8pVMIe?aVNC zU)Q$a2#&KeBR`~bMi;~j{}aOLEdQ%aHNxw7|9q8TV7{ov&yU%OXA-rd183BDV#A55 zCqclF;0fpJ8y7FEUXk6Dr9I?X9(HI2bBN94*RPS70 zJ1?EpMJ2mF?W06my%W1T=Sg6jmD5TJh0G=4P+(VI@`v_blBwR}5X9<13Ml zt`+*9p*nbF7=~Vq5oiENAP*d}APAn)eri2;a-D?fVUO+bG>(ARm*8KSNhxg%+Ce$=~wM)_z7c-0*TZ(Pk;8`T9L z)_%d;nH1i7*ZqseiR#p-pvL7-l*1e8lDR+I;ApYo+o@wblAijge9^0@?Am+s-f1_{ z$wG~TTKcE8JxMQxRd3m!vY+#I$-A^2s%g_)8h(o+DJ^0;TNh>0KY(8*FvgrW)o8Z2 zDgDi@vE}O(TT8`iA4<=El~2H9mo1VtnP)b>A{f~*K=Ll#D^JU_{lEXv2a1{SzgmoU_irK2{$7!{u5>Shpnhx5njQxOC3AAf zWSz}x?dVCWs?u}Ks!UAzl^J#^M3JmBbt?WD`Fo7AuI%LubvZ2$EY@QW&Y7D z!&XtHDTdu6`iZ%aJ1o_70snS&(u6>1Jk=Ep^hUf^f9Jlyh&isuSUhWd>A2k+3`fG_ zGdB?@xo5Q>wKE0AFMGD)_5JAI z#SoiwjPOiEZ=kZ=W)&*g)K>!f12|yRZ;yF^%t-1QvK^3CY~B_N=!j~2Qj#~3vioh< z2k7-+;syZ}g!FIC0a|r%{qQVcWK%4RW&KW6v`FMGTKFjqy8)% zvZw`VFC;2TO_mU&pPP5G65NT}R)7w_Z6kDC^;bTBAV z;~UI5I|@4-E(8yy=hF~45>osj=tg`{WEZ87UB%)S z6hWhXWJlAs$@Ny_L9MR#M&A-N6X@6clw0jvdh!21Q57VaHCIuuWPAR!N0h4q={$qp z3wRtkwsSu21^Qw5_j+pKo1v9gT;yPwap%~B)X3VMNWQH~ z(H?6U#1rY*LC6eW1cQG7$1H?ngta_4i@M5!v&z|Q%(cvEx(l)QKEy6oJ;()JOgU;F zDt%T*P@FCJvb>s(uejO&MFk$`f#&Wpq?)fbl*7W zui6!2G>nYw(wY$-VV_-1muYejBB< zt6y4R`l`?~90aIx41KHqbKq?ilx~+a9$EL*@rSr|^Pxyx5vu>ie3;SEq7ZEk{)C!LOZS!0PIWHfjjUmB0!X*7XN9H4tc$nv z;Fx(E|K1AK-J~cjB4Iul2i1_mxts>X)XX}*Q>JL{ZXLz!2-EdyhC&?=Ulrp=y!;L* zd+BPc5K-}Zcb3Ftb~l8iJSA}K4S*@97?0ChvjO(fiv1s3pUS?VY+t5YMUnDx+7W{R zKm>?wXR2rs^>>3M2ljrv`O~#Q`_pkfdUY{vV^5AA{eAS1fFPUOrD0!3?d`gUC|Uea z_JWQRvUxyaeeULM8;9|##%Ck!&UTk8QDU-^sPg*zz9p$Ce-;Df4Q1m+d{T;eqQI%y zG+N-G)v>UY4<5@4@u9cj)nCymE9Ytip23A@!^7uX;6MnK#A?FK&v;6W140@$ zC5nCj4-S|>I&Xi4VDTQtX$qN&jk?Ve%W*NC3J2Qkvfdt8@jc|xbE&NGW5w9t+54MC z{W>KFmLTVKp&JkAax?>xaTwAEoZpV6BN= zZc`m*+`I$F=yhMUx-?YJ;jZ=1FoOvw4gD5axoi6U1=ICJEx#w+>uv!Q0DGE`}a{bA+TP>bAH-w=f6tA zi*KBjm>W2_#pW(5#gWvP9Ktk|3Hcqq8EGA4mhr8<)Hp^qriQ^FSi#Ur#Nf${ZJ3FgL{3)-OfK_YWV zH=4bRq0PQ?6v&z`u;_I52cQ6+zpPVnq*L_?jX95>;uCzf`yt>rB)w2kv0hQpuDX>b z{yAhM+OnD}6(@e6D_kRztX;XRQ)BCb#N6a7=AuO@5zZ{H*_h}O>*Naj*&+EXky22UgLL3}{q94KxSEcmtlKe~pDN$cA| z^X(B;Tyy@;V;8GCUO>gSZU0)<1*6x4D|K(*-8_oGc9shTcr7r&e*o1pI6{R1*b~mo zWJOSXcPx~(nxb4buS5Q+G$-3sNYz~V3NzO1qMr><1Vl917G?T<$wM2KyfqUk#tMbJ zog&D&w6doWAeUS5+`x(kmxo&^=vnvY`_8A!yNG%Yh4Eesq^ZY`g*Y!?F&+2QgoNB; z!f^M=$-3JIv#xgD42K9fnbPIZ1eF+7w&oSLGDLiFm;-VQ4kNH6YkgZE%aiYvZ1?H?>V@RC)IzM4lF#9C-xFS|8U<{QSw{kr~+{@I8-m2<`d zo(^R1TkXkOjt5**#l&@w{gjtO0+D)`KT|h{&%->2*$Gs00X#Wj{co98^yQi-YwlORCMSyyjL>!j`icFXEcwjd13H>>BR4`5Bu} zX17Ko>;-s?=*-c&rW0+lB>8mUSjg!=KoMrbaJlC)3$2jSWe=6RRH7bY*-_-mBrw`m zEBDp?|EL9Y|6Sf*oRa=``BK95sM@$BahOtxiA1voG-0%bh3fU7dH2Eqw4o(7u_Yk> zGn~(k!}w?^31%%D#jLO;jnJ`x{K7_2kl8HRyoM5Ko;~avL&9&AHT@4zAcrX>pg!$S z+lBi^yk14$uV2J>j6N;?{9!1PY2fZ|!<{cAclPa(K3+XT@n=I7@6{n=|C zj~a7oPQpL{=SWp5`{>K&4yhjlt=l(m@&Quc0a#tm^APd=L4!XH+-Tiarb_utr~s*e zd;CR zXX&-1yIFkS3DC`lFDDQc{w_f6SCd3WTW1vwPYIr%VDvtUnH)b>bXoIoMdv#WRueTNbt7l~@*cw&ZV4buvAvV`7tC@JJuMi^&JPJ{BNucGw3$H$s zQpEo*xH4RVwIJ7Ain*z8&9gR3e2;31y~iINIy>KUIRq0t96m~7*RPMnMos$n*1&|> zbqWhw=Q)bYk1*Z!EOpeMsX@kPfxdoeC>C%&fg0n_^m@&+ba+)aL~k~+5<>3XspYs0 z&1LS#pFgE>j$82>y}Wm(z=|;x@`5#?lJL)jel|JoN^^=aIc3T$@NZKrvRz~Yls+$B ze7%oQZ-y=E3wt;;Y}W#ude#L4ncPoNSkvg}`B!zfibOJ@)?2igEKq+fl~h+(&wr6B zX$FgHe*k+*@kUn7t5r#E9vjhkslld6uV=y0?JRZDa|D6r#K zaDOd(;^q})*WQ8cNM}WvErFi~%tkp%rGyV$q_Zd*n^7fHG!harezL*(6qYBak47sQ zdBL11O)zZ`45Mx4o*(Hn$ZD^Efyad+rUlLp4str_|IfP#G9x-501oJQJ(rP+uV9zg zqB?a6twnk-JS?(UvmmI`+hW^_DjGkazq@R`*oEF&EX{AuYG+uv0Z3Vm#>XZiXypLV zkkZ~cGc`C1{+j%mh1xVBsP}rgF6ackjNH=VY{n!d;0Fc^Q=}AOGZ;{tN#Gg zP3_xiT^78Wy`>iJC39I|h9#H}_eg`@0l^Xyb4It!E2Pds#S<1aY8tIl5W;`E zHZA)6u?7n76AB>-D!1rkLw4*dt_hRt!{~fH5wu?Sy!_G0=o8c)k|WF@1q%*t^OH2g z!r}YC&(-KLi5O0qCdDv~o~<%_%m&{s{#)p-8CS{cy^dCYa4n^s zs3|+|3EDu?I3(pMZp%!M1YX^yd+ZluVm6-nDUp?E-W{WkEQ#aKIr(u`RfwvNpDT~S z!tI-dr0i88BAm(i3T_e$Ua>ok+u`}OCjFjIuoDN(=Qh_GdSEtw#(sJ^3b;RzGKr&Q zACcQ&ShIlr*z`Ea_K6-n)_8Lt18c$g9mQKXTFgWT!OVInnc~Ctw|emr zFPg`-&KPiBd|F6-!Nl$cPkIsl`_L{fPG~y@xXPgZ`-e&e{%@z^{cmgXgBm82T4zpr zQAiv(L+kosZz3(1H%t?c$7e)P&*kybde`lAy!Vdew{e!eE62xjuZxGCUzstS01_RHRsp=_>E;}?5uH4SyE|5iv-{EyB#nwW+MvmF)$e9% zr`z-WpS~SP-M6O9GF*J)=4Ahe74hFQ%Jv^#8F-r(?w|I^P|jQ7^DHVSHbVh=6WsX3 zpuQAC!Fs5wRpoNZfC>tP0ZAYjfIKn~f2u8KF8>f0Iw3zt7P{2FAiNGb+&`D;8N`iZ z)8-0Iir#u3y!Q4%yPa}b6Di?T4NK?e2sn(X;+2JGxT>s*;+E@gAgjNM%nGyVx-^v3l8|~z?xEXU7lNfosDBd=Y=*ay}E(7x# z0X73Z)RSPWIX!(+Yu23?IpKd6m7;5144mku{|0YUw+AYvO$@AC=(_P|*{tZ)x#zHT zI-?{BiS#6~u1`^HzMMRY7@UHYLf{3B%?SwjzJ72t>$oLR2F`D#${wetHl+8}rAGT+ zF~65+o@Gi^UX?~3)grf|YgTv7J{-kmRyJYSh~M)b4<1}MCM78K7vec>v!cJwlcqM2lvdL{HNv4fAAIObAVwtRgD~~p; zbP?GxxvQjRPT{g|PH2;FC~Q^&=!M$R z2Mdh2=5->}MfV;IwqCOXx6TQxbqF@CoSEu(&(V`Eb@j8QFGV0Fu4Y^tjRe*yDjhma z9CsNLtsM0K zsq8)pkvJ0uG3^J9>u=FETZUrRY>-*p(`0yPA^rh`5>JxHqboVMY(o&c&DaZCJ0ja6 zgvhinwN#>zPkewrX*!}QKZAaM2UoY~xO>aK0Sei#pMhQhuk3_-Ho~1rw~z^J17H)` z7y;BKCY4%7o;{DNLM*o&ULO4*Qxg@PMgR5ub%(;aM0c?F$DQ_WUWSj+srsO?koOZU zMpay(6fZ5jwFiI-cEs9tDM73bq`C?qOEqGs#y4R2ImDrT;pH$5SRkG>qFkmHg4x;7 zXK$}?`SoK#^MSv&CxNgbKw-EFR)WDY1*cJ=A!^M{z zvHy`nBVF`eDDApfKNG%`5Y&Gu>pTR?yomx_TB2iyw9rH^wj6WjB9VF9$aE6_88{L>y%6~IoW<6zn80n~w|Ay$TG^t- zYd@2E(6SE88+UlKaj|j#A&Ng4xC4YDrppP&2p5kY~+MS^y=!D~ql@-GktWq{qGf(j5DmYV>F{lz5 z4`^TA(zt5R()oRk!Yj=v-tb`Zq# zW)Tr#iQ2;k&;(#M=%}d;4E<(5+A60iBvkR2NM>mnVPgneb(7+J4|-W!!34>}V&Sh&Otw0H zeXDL?Qjv6@hVuPCz)|=KDE|LPUeR93Qv5tKKn(Xi1YnvE@ck+(53@Qp2+UT&!&^m7 z$f~_`GsGs1_XEX8d&-gPttFCcGK-=`xJ?&yNEkzFTyl8Pv2L1ogiF+l+qj}(v7jA< ztLXiS{CN;nFqmJ11`79wP%$-DG{W}5uoo@SPB*9QNn*Il>?%^5D~Jw<1vdYgw;#6p*h1fG$WOyx4FVBA*d5`Nh1@HTB6V4VPzZ`OISPSe#(!DlCeW_ zicybHp%Hc&WeAWZIq8Ju6M~`voh+GCL+GzA+K*sWYov>-CgOTFE2u*Z02)r(;#;mn z*uU!qQ?{FkLgKq{3{%5cbpC>PB;G+BD~foF)BT8Xwn6PpvqD_)>-(i;(6Hey7)QcV z(r|}Qr0_>=#A}`x)OM}J>8bJRe?-QiwqdjGSm^;$1nC7bP;qg|u*;ZcsR+8ETW(S! zh44Ts28Smjzr-P{Uj#64OTkv6(k`6I{C-L+uJml?Bw>9>mX_ji+`E@lNO%b^rWNY^d~;A4nuC^m3lp6X$y zLJ)@ZX#{>*n8FOOey^!8fxN>+a^NM;bY-1rq{HUKR%txq%Niy`9M;C-8^dLrpOrvW z#;rK2jMy07JlJ3ZHaD+b7cnr#F@R#3ug#hwT8WlKV%QmMNHQ#c9dDj3@4K^7sm3^bXs^)vy9Ze?h>0UWNn+_A3_D(b(KRnrBYyvaxR)+;&9zJp1Je229(ZNARE8kxS&O(P_!x12!24w&jcFN zwYb!XlzCLal1QWE?LAvZhrAxm47S0eBP!V5QsZ%D*&2VnJ>ZY|Xbnn@2sHC)d^|`77d{+O`1Yh zZqGA+?=%yHo;EkKjzwu?)BgcbnV2eC|K#V^AGOq3v z@(@QUBX=pQ(quq|3^;Z6@b-tdP9sj8XiC%;J5w~x8eqdVxBp?c_)Ou3_&BsATP%y> zHbHf{X3>bEPLtCh@NEUt3=gJjpxGyh4J{fY5Iud-mFRRnzs6{c`bVEks*pj8GVDUf zADR52w8mk$LBh&_tU|DTVvAd3}EHwWH~>eMz759zC6i{@-udu;2Os;6VWaRGP-;L>rt!Aw%Cyv~1Syw4p^Y z_7d> zwwblL_tz9syE@2E_vQE)sThk+_1{FtgOdbXu1BF7RciuB9@ubOm91==kfbwWY)p(t+h}{$g|L=570-U|vny%z?aQ)1 zq9?123Vu&F&qhjsR96Zcn6b!q%CYK1)CATAi?rWJ6D^B{6|USEW-kGf;J^Ze1OZfe zQ1DQ{^0CstI}l7b{R4Q+gj&~yM%TA*ixtHS;F*r95~`?+WhSxdVAmzA&D9QX=S2j> zkgH(~NXBv&S#GP1oKI)4!orY1Kwl}PlT<})s_)7XIQ3214uwie8|-K9Kx7~a&rb;H zcF8e$TcXyFjXN+-mjoR)LRi+A&8izDK*EDz%-x`6zS6p{Eq#B;y=Cf|7@H6(UI=QUK__=8+sgogW8_{uNGRh@?Q`~wY2^( znE!?VqTc{uKrl!M2v|5EEZ{#sFmT{E2mmA$Dls&Zkm7d?!+LfFH7D1=gx~pS=%nN< zMvlpix6Eu!m?RX!#sLj|{r}bhFks+dU;==D03#&|I+BI|Ie6XWWIs)1(2}O9*P~KV zC+SL+TJaR5QFxC~AQJ*^m$4D`*YXzhU?q_j>xG-cQjB>tSBg%?>XcLfP?W-S%ZRz2 z#>%fA3#9T*SF6wX@}O|56gK@-&-yPv7Q50*5DN7!S-s}^ga04E;we+<@&ZP+;b5sP zI>!6T3UL7P5x*MjB~i)vXZ|HZlLA( zw1N+^F!yx(&$Jj;#p*wmG=|dxe}-~G?lu)kLdg8Wr{Iu4d}=GNxp=Bl?&wHK4Ab?s zFfG9MV2*EQUWg2(6PD!3Gpqmeo{Xeea_$Rj*1m#MVsdJcrZ zyhL4hiB>Uyi>~R;&8o1}HE07?H|$L*c_ExH^#%Nepw)fK>nvmZnyeolmkgnMwk%^% z(-#*OLpQycQ>NrKqa??0g`GhAD;R6*RoB;UNv$k6OfI&gAZKeOv8V>$Z|Gv_@SBtr zdnvF3)nh#uT75}PMJazJ6G6o%x7ZVjj83TmEhL~g1g)hQGX5JRDQlV5UR!QkInxns zg-Bs#YXmYoMiSj|c8RS69;^zsqI4nku_WcF2E#y?z<-}jOItA|`i>9{=>r9z$rPfYYugJqNpdh&~@ zAuuuSGx|azX|lN_TVT6sXs$QiIx>dUOa_s`(ue5`%IX?SP15Fd+%tiU#-IQfj}yoj z_3%$xvA5Hf6^sz0&f988eNBD<9+HaC3ZKyw#Ee4_f+PwL*q(WNr2JuuH1_Kg(7{&N19vd!ZJ$^&#)bRwT3 zZtIJC8YAx(z;uqvp>zl=#M@@>@ADLbOR$S|mgbfj4&YkCNkJItl4NMt!F_=vOE+j# z<@VhBBA%%0dE{iA#aZHp(qW!%<(lViQ3wCzvK*-J=DZj-l+VU!qP%+-mj0?H1`p!HA%%-mHt;(;A5 z&<w<5n*xDO={{y`AX7EG6 zqGUy0f5W?y9oW#MY!dCH{qx zhqi^eCaeQENp>F54jOrRPlL*Wj`lHCRqbEgm*1xVN(t8I9Mp2;yf&UPjfClg8b^%qpZkl`POHc#RbT;&?7_xRxOW*G$<#AX3n z%iS`CBh)f_O5@3`aF(y8mK@Sz#Y>nYMRkYUXQRR@uT+V2@Lbg~G>`8UDQIi#IW%BG$-K*fELtYH5FdB2Tw4tq?D)qK&!|z*vQF)c? znU(%5HvcHgD(UDvkCjeZ>Zf6i1iJmv1V2^or-A(kAlP#Bem7UL{p+9AIigb;@7)vf z#A+CBdKM=BYe6~-%_DRDj5Z!FkjyKO45D26oGC}I@^He+tbkO$T3I7ZGBKoK)ZQT- zRT_*h2&ORr#l2;V)Kh3El%x>66wb;-Ik>|2h5EkDNtnl#c#Wig6~}!#8?#=UN1cYopl0KZSZ&j&;Mim)M}b=)rxDX0d!3t1 zaq9mHvjt50B`w6Ts)ah9U!4B{XMW5VoR9-oNmxo(#CZ%+t5sPUo<(I+oRb!%N>XX? zA1d=`QYlWjB)Ej2*!G1%E9w+SL5h~HQz&#u6(?H~W>sLi`>|uGA4e?|CDFvNL36Yz z32c1e0PV5HdCw6utlcMdh*M&!K5KxrnH&YD86(PW+AGB_F#YZFM884a9DA8G}hRZT76`;x)x`}DDB^F9bjrc$bu~$VA zhUoAZP>ReLY@xHpI3u)_aSg7JgoNC2FLAiKx957Eo#mRGs%1^siA3sKvLA@!NkUMU z<%>Sy09>hQB|KiY4Td9Snl&{nn9vzR%%${pyd>YwTy4kfld?muB`P)o{H?whm+2B) zC|w>K$!sZ2p)i;svUgDfv$Aiw+nG1t8s|xaOsr{lnNwSUmou~;KFO3Ym5|!(3Q&}S zN4stCIS+95&ZPZc=ReW9zwS=s$+2aBaZXsMM?d6{&gmt%!H9za(MvI4y> z5R=@FHSf_DwigbN&jMFHmJm`8&I5bdHw52u1`P%*lyRn0LBCxJ7x~bu6x*K40{;N5 zY&6t%71%>;dB-KXv=ug_j(=$&Tcxj$DVWYjs%a2)NmAX;@EtrLg!*i{qBrio2O;hr z+0?(Q?CIbA8mwFXi`epQh~l)0!P9 z0F{ZhCNv*tk~r3Y6op)pk^r%{BWsNJHV2uZ&+ zaGmY9Gl=pdgf_6y3cEo-jqXUlEZXBi`GO=1JcPf>NZrDoaV?u}X+m_Zt#CZ-qw) z0HH}*7E+Y0JBuBT!*SD}D~veU2|@=z3GV2hv~=;eb{yjX3!7hVqxo{fjilK{0IK%l zw)XD7cP+6`C=Z}6*0{C(wNlyzYKvl-z?`r~oX857API`Mts3|Fq zPozl$sOx{fJAyzH zx}BIQRJ0WcwD6QIsEps{m}HJfX&!*;d7KR+)D-TWU(5sC+DCVeK1sC`^Tn9I*gQON zG?P$9{{So*hUb|G8}jk+`|p2}T|)fuW-gzIJUo0p{eDR`8>Y49r1&MaY&mQR-rIHl zH~A#gZ_j3H{{V@P4-db_^gSJ(#d4Wj&6(PB?2>Yf_NH}!Z+fB)GRFH$!E literal 0 HcmV?d00001 From 4fee8c28088d8f396165236ea2d2f0c62ce781da Mon Sep 17 00:00:00 2001 From: wxDai Date: Wed, 30 Aug 2023 16:56:26 +0800 Subject: [PATCH 12/24] [Enhance] Supprort Training PoseC3D on K400 Skeleton (#2599) --- configs/skeleton/posec3d/README.md | 13 +- configs/skeleton/posec3d/metafile.yml | 20 ++ ...owonly_r50_8xb32-u48-240e_k400-keypoint.py | 146 +++++++++++++++ mmaction/datasets/pose_dataset.py | 54 ++++++ mmaction/datasets/transforms/__init__.py | 12 +- .../datasets/transforms/pose_transforms.py | 173 +++++++----------- tests/data/annotations/sample.pkl | Bin 284438 -> 278252 bytes tests/datasets/test_pose_dataset.py | 37 +++- .../transforms/test_pose_transforms.py | 150 +++++++-------- tools/data/skeleton/README.md | 2 +- tools/data/skeleton/README_zh-CN.md | 2 +- 11 files changed, 398 insertions(+), 211 deletions(-) create mode 100644 configs/skeleton/posec3d/slowonly_r50_8xb32-u48-240e_k400-keypoint.py diff --git a/configs/skeleton/posec3d/README.md b/configs/skeleton/posec3d/README.md index 93b526e5ac..2546706e57 100644 --- a/configs/skeleton/posec3d/README.md +++ b/configs/skeleton/posec3d/README.md @@ -79,10 +79,13 @@ Human skeleton, as a compact representation of human action, has received increa | :---------------------: | :------------: | :--: | :----------: | :------: | :--------------: | :---: | :----: | :-------------------------------------: | :-----------------------------------: | :----------------------------------: | | uniform 48 | keypoint | 8 | SlowOnly-R50 | 69.6 | 10 clips | 14.6G | 3.0M | [config](/configs/skeleton/posec3d/slowonly_kinetics400-pretrained-r50_8xb16-u48-120e_hmdb51-split1-keypoint.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/posec3d/slowonly_kinetics400-pretrained-r50_8xb16-u48-120e_hmdb51-split1-keypoint/slowonly_kinetics400-pretrained-r50_8xb16-u48-120e_hmdb51-split1-keypoint_20220815-17eaa484.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/posec3d/slowonly_kinetics400-pretrained-r50_8xb16-u48-120e_hmdb51-split1-keypoint/slowonly_kinetics400-pretrained-r50_8xb16-u48-120e_hmdb51-split1-keypoint.log) | -1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 8 GPUs x 8 videos/gpu and lr=0.04 for 16 GPUs x 16 videos/gpu. -2. You can follow the guide in [Preparing Skeleton Dataset](/tools/data/skeleton/README.md) to obtain skeleton annotations used in the above configs. +# Kinetics400 + +| frame sampling strategy | pseudo heatmap | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :------------: | :--: | :----------: | :------: | :--------------: | :---: | :----: | :-------------------------------------: | :-----------------------------------: | :----------------------------------: | +| uniform 48 | keypoint | 8 | SlowOnly-R50 | 47.4 | 10 clips | 19.1G | 3.2M | [config](/configs/skeleton/posec3d/slowonly_r50_8xb32-u48-240e_k400-keypoint.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/posec3d/slowonly_r50_8xb32-u48-240e_k400-keypoint/slowonly_r50_8xb32-u48-240e_k400-keypoint_20230731-7f498b55.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/posec3d/slowonly_r50_8xb32-u48-240e_k400-keypoint/slowonly_r50_8xb32-u48-240e_k400-keypoint.log) | + +You can follow the guide in [Preparing Skeleton Dataset](/tools/data/skeleton/README.md) to obtain skeleton annotations used in the above configs. ## Train @@ -96,7 +99,7 @@ Example: train PoseC3D model on FineGYM dataset in a deterministic option. ```shell python tools/train.py configs/skeleton/posec3d/slowonly_r50_8xb16-u48-240e_gym-keypoint.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For training with your custom dataset, you can refer to [Custom Dataset Training](/configs/skeleton/posec3d/custom_dataset_training.md). diff --git a/configs/skeleton/posec3d/metafile.yml b/configs/skeleton/posec3d/metafile.yml index b949a23d47..8a5b58bf76 100644 --- a/configs/skeleton/posec3d/metafile.yml +++ b/configs/skeleton/posec3d/metafile.yml @@ -125,3 +125,23 @@ Models: Top 1 Accuracy: 86.8 Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/posec3d/slowonly_kinetics400-pretrained-r50_8xb16-u48-120e_ucf101-split1-keypoint/slowonly_kinetics400-pretrained-r50_8xb16-u48-120e_ucf101-split1-keypoint.log Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/posec3d/slowonly_kinetics400-pretrained-r50_8xb16-u48-120e_ucf101-split1-keypoint/slowonly_kinetics400-pretrained-r50_8xb16-u48-120e_ucf101-split1-keypoint_20220815-9972260d.pth + + - Name: slowonly_r50_8xb32-u48-240e_k400-keypoint + Config: configs/skeleton/posec3d/slowonly_r50_8xb32-u48-240e_k400-keypoint.py + In Collection: PoseC3D + Metadata: + Architecture: SlowOnly-R50 + Batch Size: 32 + Epochs: 240 + FLOPs: 19.1G + Parameters: 3.2M + Training Data: Kinetic400 + Training Resources: 8 GPUs + pseudo heatmap: keypoint + Results: + - Dataset: Kinetic400 + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 47.4 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/posec3d/slowonly_r50_8xb32-u48-240e_k400-keypoint/slowonly_r50_8xb32-u48-240e_k400-keypoint.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/posec3d/slowonly_r50_8xb32-u48-240e_k400-keypoint/slowonly_r50_8xb32-u48-240e_k400-keypoint_20230731-7f498b55.pth diff --git a/configs/skeleton/posec3d/slowonly_r50_8xb32-u48-240e_k400-keypoint.py b/configs/skeleton/posec3d/slowonly_r50_8xb32-u48-240e_k400-keypoint.py new file mode 100644 index 0000000000..320d37898a --- /dev/null +++ b/configs/skeleton/posec3d/slowonly_r50_8xb32-u48-240e_k400-keypoint.py @@ -0,0 +1,146 @@ +_base_ = '../../_base_/default_runtime.py' + +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + in_channels=17, + base_channels=32, + num_stages=3, + out_indices=(2, ), + stage_blocks=(3, 4, 6), + conv1_stride_s=1, + pool1_stride_s=1, + inflate=(0, 1, 1), + spatial_strides=(2, 2, 2), + temporal_strides=(1, 1, 2), + dilations=(1, 1, 1)), + cls_head=dict( + type='I3DHead', + in_channels=512, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5, + average_clips='prob')) + +dataset_type = 'PoseDataset' +data_root = 'data/skeleton/kpfiles' +ann_file = 'data/skeleton/k400_2d.pkl' +left_kp = [1, 3, 5, 7, 9, 11, 13, 15] +right_kp = [2, 4, 6, 8, 10, 12, 14, 16] +box_thr = 0.5 +valid_ratio = 0.0 + +train_pipeline = [ + dict(type='DecompressPose', squeeze=True), + dict(type='UniformSampleFrames', clip_len=48), + dict(type='PoseDecode'), + dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True), + dict(type='Resize', scale=(-1, 64)), + dict(type='RandomResizedCrop', area_range=(0.56, 1.0)), + dict(type='Resize', scale=(56, 56), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5, left_kp=left_kp, right_kp=right_kp), + dict(type='GeneratePoseTarget', with_kp=True, with_limb=False), + dict(type='FormatShape', input_format='NCTHW_Heatmap'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecompressPose', squeeze=True), + dict(type='UniformSampleFrames', clip_len=48, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True), + dict(type='Resize', scale=(64, 64), keep_ratio=False), + dict(type='GeneratePoseTarget', with_kp=True, with_limb=False), + dict(type='FormatShape', input_format='NCTHW_Heatmap'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecompressPose', squeeze=True), + dict( + type='UniformSampleFrames', clip_len=48, num_clips=10, test_mode=True), + dict(type='PoseDecode'), + dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True), + dict(type='Resize', scale=(64, 64), keep_ratio=False), + dict( + type='GeneratePoseTarget', + with_kp=True, + with_limb=False, + double=True, + left_kp=left_kp, + right_kp=right_kp), + dict(type='FormatShape', input_format='NCTHW_Heatmap'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=10, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + split='train', + pipeline=train_pipeline, + box_thr=box_thr, + data_prefix=dict(skeleton=data_root)))) +val_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + split='val', + pipeline=val_pipeline, + box_thr=box_thr, + data_prefix=dict(skeleton=data_root), + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + split='val', + pipeline=test_pipeline, + box_thr=box_thr, + data_prefix=dict(skeleton=data_root), + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=24, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + eta_min=0, + T_max=24, + by_epoch=True, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.4, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=40, norm_type=2)) + +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (32 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=256) diff --git a/mmaction/datasets/pose_dataset.py b/mmaction/datasets/pose_dataset.py index a06a7f7c0d..ef2de64bb9 100644 --- a/mmaction/datasets/pose_dataset.py +++ b/mmaction/datasets/pose_dataset.py @@ -3,6 +3,7 @@ from typing import Callable, Dict, List, Optional, Union import mmengine +from mmengine.logging import MMLogger from mmaction.registry import DATASETS from .base import BaseActionDataset @@ -29,14 +30,29 @@ class PoseDataset(BaseActionDataset): For NTURGB+D 120, allowed choices are 'xsub_train', 'xsub_val', 'xset_train', 'xset_val'. For FineGYM, allowed choices are 'train', 'val'. Defaults to None. + valid_ratio (float, optional): The valid_ratio for videos in + KineticsPose. For a video with n frames, it is a valid + training sample only if n * valid_ratio frames have human + pose. None means not applicable (only applicable to Kinetics + Pose).Defaults to None. + box_thr (float): The threshold for human proposals. Only boxes + with confidence score larger than `box_thr` is kept. None + means not applicable (only applicable to Kinetics). Allowed + choices are 0.5, 0.6, 0.7, 0.8, 0.9. Defaults to 0.5. """ def __init__(self, ann_file: str, pipeline: List[Union[Dict, Callable]], split: Optional[str] = None, + valid_ratio: Optional[float] = None, + box_thr: float = 0.5, **kwargs) -> None: self.split = split + self.box_thr = box_thr + assert box_thr in [.5, .6, .7, .8, .9] + self.valid_ratio = valid_ratio + super().__init__( ann_file, pipeline=pipeline, modality='Pose', **kwargs) @@ -62,3 +78,41 @@ def load_data_list(self) -> List[Dict]: item['frame_dir'] = osp.join(self.data_prefix['video'], item['frame_dir']) return data_list + + def filter_data(self) -> List[Dict]: + """Filter out invalid samples.""" + if self.valid_ratio is not None and isinstance( + self.valid_ratio, float) and self.valid_ratio > 0: + self.data_list = [ + x for x in self.data_list if x['valid'][self.box_thr] / + x['total_frames'] >= self.valid_ratio + ] + for item in self.data_list: + assert 'box_score' in item,\ + 'if valid_ratio is a positive number,' \ + 'item should have field `box_score`' + anno_inds = (item['box_score'] >= self.box_thr) + item['anno_inds'] = anno_inds + + logger = MMLogger.get_current_instance() + logger.info( + f'{len(self.data_list)} videos remain after valid thresholding') + + return self.data_list + + def get_data_info(self, idx: int) -> Dict: + """Get annotation by index.""" + data_info = super().get_data_info(idx) + + # Sometimes we may need to load skeleton from the file + if 'skeleton' in self.data_prefix: + identifier = 'filename' if 'filename' in data_info \ + else 'frame_dir' + ske_name = data_info[identifier] + ske_path = osp.join(self.data_prefix['skeleton'], + ske_name + '.pkl') + ske = mmengine.load(ske_path) + for k in ske: + data_info[k] = ske[k] + + return data_info diff --git a/mmaction/datasets/transforms/__init__.py b/mmaction/datasets/transforms/__init__.py index f2670cd929..3d1ee91e27 100644 --- a/mmaction/datasets/transforms/__init__.py +++ b/mmaction/datasets/transforms/__init__.py @@ -9,11 +9,11 @@ PIMSDecode, PIMSInit, PyAVDecode, PyAVDecodeMotionVector, PyAVInit, RawFrameDecode, SampleAVAFrames, SampleFrames, UniformSample, UntrimmedSampleFrames) -from .pose_transforms import (GeneratePoseTarget, GenSkeFeat, JointToBone, - LoadKineticsPose, MergeSkeFeat, MMCompact, - MMDecode, MMUniformSampleFrames, PadTo, - PoseCompact, PoseDecode, PreNormalize2D, - PreNormalize3D, ToMotion, UniformSampleFrames) +from .pose_transforms import (DecompressPose, GeneratePoseTarget, GenSkeFeat, + JointToBone, MergeSkeFeat, MMCompact, MMDecode, + MMUniformSampleFrames, PadTo, PoseCompact, + PoseDecode, PreNormalize2D, PreNormalize3D, + ToMotion, UniformSampleFrames) from .processing import (CenterCrop, ColorJitter, Flip, Fuse, MultiScaleCrop, RandomCrop, RandomRescale, RandomResizedCrop, Resize, TenCrop, ThreeCrop) @@ -26,7 +26,7 @@ 'DenseSampleFrames', 'Flip', 'FormatAudioShape', 'FormatGCNInput', 'FormatShape', 'Fuse', 'GenSkeFeat', 'GenerateLocalizationLabels', 'GeneratePoseTarget', 'ImageDecode', 'ImgAug', 'JointToBone', - 'LoadAudioFeature', 'LoadHVULabel', 'LoadKineticsPose', + 'LoadAudioFeature', 'LoadHVULabel', 'DecompressPose', 'LoadLocalizationFeature', 'LoadProposals', 'LoadRGBFromFile', 'MergeSkeFeat', 'MultiScaleCrop', 'OpenCVDecode', 'OpenCVInit', 'OpenCVInit', 'PIMSDecode', 'PIMSInit', 'PackActionInputs', diff --git a/mmaction/datasets/transforms/pose_transforms.py b/mmaction/datasets/transforms/pose_transforms.py index 0420f4ec8d..8627a79f96 100644 --- a/mmaction/datasets/transforms/pose_transforms.py +++ b/mmaction/datasets/transforms/pose_transforms.py @@ -1,12 +1,11 @@ # Copyright (c) OpenMMLab. All rights reserved. -import copy as cp -import pickle from typing import Dict, List, Optional, Tuple, Union import numpy as np +import scipy from mmcv.transforms import BaseTransform, KeyMapper from mmengine.dataset import Compose -from mmengine.fileio import FileClient +from packaging import version as pv from scipy.stats import mode from torch.nn.modules.utils import _pair @@ -14,93 +13,65 @@ from .loading import DecordDecode, DecordInit from .processing import _combine_quadruple +if pv.parse(scipy.__version__) < pv.parse('1.11.0'): + get_mode = mode +else: + from functools import partial + get_mode = partial(mode, keepdims=True) + @TRANSFORMS.register_module() -class LoadKineticsPose(BaseTransform): - """Load Kinetics Pose given filename (The format should be pickle) +class DecompressPose(BaseTransform): + """Load Compressed Pose. + + Required Keys: + + - frame_inds + - total_frames + - keypoint + - anno_inds (optional) - Required keys are "filename", "total_frames", "img_shape", "frame_inds", - "anno_inds" (for mmpose source, optional), added or modified keys are - "keypoint", "keypoint_score". + Modified Keys: + + - keypoint + - frame_inds + + Added Keys: + + - keypoint_score + - num_person Args: - io_backend (str): IO backend where frames are stored. Default: 'disk'. squeeze (bool): Whether to remove frames with no human pose. - Default: True. - max_person (int): The max number of persons in a frame. Default: 10. - keypoint_weight (dict): The weight of keypoints. We set the confidence - score of a person as the weighted sum of confidence scores of each - joint. Persons with low confidence scores are dropped (if exceed - max_person). Default: dict(face=1, torso=2, limb=3). - source (str): The sources of the keypoints used. Choices are 'mmpose' - and 'openpose-18'. Default: 'mmpose'. - kwargs (dict, optional): Arguments for FileClient. + Defaults to True. + max_person (int): The max number of persons in a frame. Defaults to 10. """ - def __init__(self, - io_backend='disk', - squeeze=True, - max_person=100, - keypoint_weight=dict(face=1, torso=2, limb=3), - source='mmpose', - **kwargs): - - self.io_backend = io_backend + def __init__(self, squeeze: bool = True, max_person: int = 10) -> None: self.squeeze = squeeze self.max_person = max_person - self.keypoint_weight = cp.deepcopy(keypoint_weight) - self.source = source - if source == 'openpose-18': - self.kpsubset = dict( - face=[0, 14, 15, 16, 17], - torso=[1, 2, 8, 5, 11], - limb=[3, 4, 6, 7, 9, 10, 12, 13]) - elif source == 'mmpose': - self.kpsubset = dict( - face=[0, 1, 2, 3, 4], - torso=[5, 6, 11, 12], - limb=[7, 8, 9, 10, 13, 14, 15, 16]) - else: - raise NotImplementedError('Unknown source of Kinetics Pose') - - self.kwargs = kwargs - self.file_client = None - - def transform(self, results): - """Perform the kinetics pose decoding. + def transform(self, results: Dict) -> Dict: + """Perform the pose decoding. Args: results (dict): The resulting dict to be modified and passed to the next transform in pipeline. """ - assert 'filename' in results - filename = results.pop('filename') - - # only applicable to source == 'mmpose' - anno_inds = None - if 'anno_inds' in results: - assert self.source == 'mmpose' - anno_inds = results.pop('anno_inds') - results.pop('box_score', None) - - if self.file_client is None: - self.file_client = FileClient(self.io_backend, **self.kwargs) - - bytes = self.file_client.get(filename) - - # only the kp array is in the pickle file, each kp include x, y, score. - kps = pickle.loads(bytes) + required_keys = ['total_frames', 'frame_inds', 'keypoint'] + for k in required_keys: + assert k in results total_frames = results['total_frames'] - frame_inds = results.pop('frame_inds') + keypoint = results['keypoint'] - if anno_inds is not None: - kps = kps[anno_inds] - frame_inds = frame_inds[anno_inds] + if 'anno_inds' in results: + frame_inds = frame_inds[results['anno_inds']] + keypoint = keypoint[results['anno_inds']] - frame_inds = list(frame_inds) + assert np.all(np.diff(frame_inds) >= 0), \ + 'frame_inds should be monotonical increasing' def mapinds(inds): uni = np.unique(inds) @@ -112,63 +83,43 @@ def mapinds(inds): frame_inds = mapinds(frame_inds) total_frames = np.max(frame_inds) + 1 - # write it back results['total_frames'] = total_frames - h, w = results['img_shape'] - if self.source == 'openpose-18': - kps[:, :, 0] *= w - kps[:, :, 1] *= h + num_joints = keypoint.shape[1] + num_person = get_mode(frame_inds)[-1][0] - num_kp = kps.shape[1] - num_person = mode(frame_inds)[-1] - # Ensure compatibility with lower version of scipy - if isinstance(num_person, np.ndarray): - num_person = num_person[0] - - new_kp = np.zeros([num_person, total_frames, num_kp, 2], + new_kp = np.zeros([num_person, total_frames, num_joints, 2], dtype=np.float16) - new_kpscore = np.zeros([num_person, total_frames, num_kp], + new_kpscore = np.zeros([num_person, total_frames, num_joints], dtype=np.float16) - # 32768 is enough - num_person_frame = np.zeros([total_frames], dtype=np.int16) + nperson_per_frame = np.zeros([total_frames], dtype=np.int16) - for frame_ind, kp in zip(frame_inds, kps): - person_ind = num_person_frame[frame_ind] + for frame_ind, kp in zip(frame_inds, keypoint): + person_ind = nperson_per_frame[frame_ind] new_kp[person_ind, frame_ind] = kp[:, :2] new_kpscore[person_ind, frame_ind] = kp[:, 2] - num_person_frame[frame_ind] += 1 - - kpgrp = self.kpsubset - weight = self.keypoint_weight - results['num_person'] = num_person + nperson_per_frame[frame_ind] += 1 if num_person > self.max_person: for i in range(total_frames): - np_frame = num_person_frame[i] - val = new_kpscore[:np_frame, i] - - val = ( - np.sum(val[:, kpgrp['face']], 1) * weight['face'] + - np.sum(val[:, kpgrp['torso']], 1) * weight['torso'] + - np.sum(val[:, kpgrp['limb']], 1) * weight['limb']) - inds = sorted(range(np_frame), key=lambda x: -val[x]) - new_kpscore[:np_frame, i] = new_kpscore[inds, i] - new_kp[:np_frame, i] = new_kp[inds, i] - results['num_person'] = self.max_person - - results['keypoint'] = new_kp[:self.max_person] - results['keypoint_score'] = new_kpscore[:self.max_person] + nperson = nperson_per_frame[i] + val = new_kpscore[:nperson, i] + score_sum = val.sum(-1) + + inds = sorted(range(nperson), key=lambda x: -score_sum[x]) + new_kpscore[:nperson, i] = new_kpscore[inds, i] + new_kp[:nperson, i] = new_kp[inds, i] + num_person = self.max_person + results['num_person'] = num_person + + results['keypoint'] = new_kp[:num_person] + results['keypoint_score'] = new_kpscore[:num_person] return results - def __repr__(self): + def __repr__(self) -> str: repr_str = (f'{self.__class__.__name__}(' - f'io_backend={self.io_backend}, ' f'squeeze={self.squeeze}, ' - f'max_person={self.max_person}, ' - f'keypoint_weight={self.keypoint_weight}, ' - f'source={self.source}, ' - f'kwargs={self.kwargs})') + f'max_person={self.max_person})') return repr_str diff --git a/tests/data/annotations/sample.pkl b/tests/data/annotations/sample.pkl index ee61c7125247ab7d622d9ef6528ce01e88af99c5..63a3834b7a158cbcbdd05f4a78618afeacf7a4f7 100644 GIT binary patch delta 37826 zcmbuo2YeMp_dcGz_W}Xwy(&@^k!S(|LT?EWZf<~u5I`}cabKm@b-;b|+_?($D^PF?foHH{!JF~mleshwVY1XRhqUypWk#!NXos^85HM1hTsJv!YRl0eAl*}Bxq-e#gx)qsgv&{BVGHtb) zyL!~xqE*o)W(O(Bm|Zz~R;0#KMxEJFN=ny9^U^D`OY#?HnVqC0dr9rWoTBQq!upkF z=fyT#rl+l~T2Wq6mlnx1y9g*eZ{A2#Umq!}sWZDu$-Jr+6$RsJO4p8_W_FX3S-E4Y zbJs<3=2vE#-KAt&*23JvNd25e*#%|~DOp`-Eiwxuk7JUYvoK5KP$?uvA?kCcpFl~HWu8D{ak8ndsIq>Wya zS5v#DWY*$zv!9gY7t9*Fa?PByF&UX=e<@iryP$qekv_7rKFb^+C7GF#;+3O|>c-4m zXY{QkPp^>r(=B1YP%HReGAT9{)LO)Hu{Bdxf=94I9f1!b$pL~A2fQK@;bl*|~t zG^0+RrV8|QbC8sz)u{5)>LvBH`D@IbC&YiZ> z9Lf?qrF2n!(c;LO%)*fc<{<(qTa-1YD!eAUV12|qR7%z@9T&|t3o}dfQZwEe$E{mg z5-HQi&8e$14-?F+tcsj!Gk5OF%4z0sDH$_wWci{wY4d9{R+xuN$&$3<(%K?p@vMyb z<_IaV*ILoSd1Y%yW~eMPO^Whz*3Kwco4H_NVY+#Ql+=zah^$#(Shpm-z&uh)O4AE+ z$5xD*w@THR=~9vrsWNg#XUr?l%`r2iWNyLevN^T+v!^XwX^xbVQCS7`W6DbAuPiJz zM@h+Odue{z%95)3kz+H>OexA%3-U%4l$9A{>detnQk*|0Bdw%fuR+frBPAuH3&u_> zDpmD4>E>7|SzBH>cii+<MKJ zB^3+pdD)ArO4qG0)|eBdC?~3~Ey+qN&0jy?oG2xgbF=g%OXjc7ugozgNy*}p8P%mr zOBb(Rm}^d!lG+6`R#zA6)tML_Q>3K8%uP#MIDNjpVud+XO4em(jtm!$DXmJIW=@lm zMRn!&;<3@1wb7D@IbDh}X6BSutV^q^nI17`NXhhqg;_P_6|={oTV+d0TIM(_vY=pL z_0kn)j+B%x4bNL>70oQ2S!d3al8VakoT~Kl;`)j;<}4{$F{gCVDnr-T&dM=oOG#OI zhO!sbM6GdFrkN{61=DipR3ozUm(-YZq@*;nJU4r7)uO5yYs|2ejLMr;T0W}0V*S{N znI|PVGqNhzRTr6Q`4KZBCFM04qswZl7q6*WV&+Rpe%-858LP+6D;qiAoGT@{c`N4? z+ORUl~G{Mmy&{-`8k=HGjq(m`KB%?`{76Mr{-Ugc)ho=!QZj8$S#A2*+S<%frREYTu@+4sp7!@&# zrKDt0_R^Z@{5jF-xn_x!)J>nWa`dXo^tq#J%u*@Io?no^w5nu&ZqW*}OiI$$*M?{3 zt9-kzI@2tVgG$$@a}uaqn{HM}Nwh)_FPuHDc5Y;vxk5^^W|S3_%$=QWmDick@;WIi zuCA`Jiml3;>N>M>nQ08$V6Kc`P*)X~l&>;ZMRK&oZ86VRsN$;fqOwY5Rz+J$No7^n z6-9N?;@WbvIxnDvWWBgJL9vO2?Dv)Np$o9j+C*X!o7 z!@^o5q&1kwIU38Xb+r)Ous8sL%8};rTTVIIv~_diuy9b6O*A*<_sY+o^Edtp38z@* z2_;*is~4BY%&DPnJ8QH>f$|fh_?eS*v%xYqmq;s1YStFjmE!7Po*Y$CG5C}ytk|NP zr@B_07LEUK#YppX{_q*Pc_w~1x+y>T7ti7^?i?O^G+>|2pW3RM+bpxOWN~}gZ6Ok? zip0@%<~flOnyh5%i&s^andfdWw-1{&WglSp7v4T=%7>%mAG4oS$iMKpv!;B0KL1@1 z-ZpE>SI_X@v*9hPS8p(5{-B;0{(SN9=0QC_+S&=K8`BFI(;KwS=7o&uQ5QuoOpfWr zu0t-NLoU_L%ixgkpMvaWlPb=@1Id++wU_nt`W-8-k2i;ab z#8&Up&E0JEZ4Ktbo`3hS)sN`rqb;}kF==&ehX=H>$Jy#9bn{8ed@9+$PrJYL41eia z-F(jZrRP2W?&UANpqnqY{7Wy%FKzAgrDng(UwTD1U$xBF=-(0I-`9QqJ;n3yiJpIt z_xyXT=ih4j_l;rERq$7|Dw-OV$tDYv&70BM=$lc~wf!yH{H370_8x88rE`E_`GwbvFMZ8e z;x%KI*NjPCGsbz%$Ye9V@^_Q3!#_x$@MhL;qHPnsq^^_9tlzr5WIuaJ(Rb1P$-U%1 zZu`GyTYk{ZAJLY`4or?edEMk^2JaW${I%ua{l?%e{xlfd^keYA#_ezP+<9uRyMp%b z^uQmw`KM+6mE2|ic79f+V6Mh5D-Efx0-(z%#`Sms!QY4vAyiBC^5Q#Bt=SfqkQ}tqPjB=uk}Ex ztDd0jf!iBYFIKqMG1VI!oz_QSeFfH%)B2%CoVH49dgPK|jn<<-O6au#B6%RFr4DM` zI=owSAZr{m4rU%^3=-5}P{)iRtVqOeC^(vNh`p$+66&yyL{t%)^Oekm_nIC`Z5rT7$}BO(K}%z|o821vf!( zEqQSwYQ&45?7Z-h&x1YHBoxqTlSOa}sHLVdmSxjed6+RnP}!ia89A&;1al@h znlVdovjx|Z8M&yT89P%C9~H9afTImzk;((LRD_7cD9i`5T=_XTbq`0#FL zifrtVo;qkgN}}zPf~KPo>lDOovsxfQTZj^40Z4Mt3MlV*YZ3GCRv}VdagE5H|4D;d z!iq%9mV%?RjuP0>0&B@x%TOaR%Lp{iPo3SkeDYm8-_01K^*9C<^jncASPqJx4{<^_+_`a33Z>BwN#UzQp*!X*3Eygz7#2u!`06R|IBX5` zaM)U;x>^THhwW}q>sgTq-?8B6u;T=_L0~O8?0D2j_+HnX@QvTOIBSX4!$t*tw^0;q z0=3i$6222z<5+VN^RT8tP@6#=YfffGB7CQSqcvLucB;TyvgS0@Ncbj>9~-bw2S*dm z5UDdkEp-+ViSV5b?%#xOE9HIR+Xg<|H#vNbD2;YZ3g0;>bi;S9gl{`ajB`Pf!xy8x z4VE!!8!sB?4>7VV9yt!k4)GU4{}m?Q)U40@PAh zGH&)&tZ~e^nt7OUji9atbY+BQI!B_p>4q&j-NKkq-*& zA%V5z$X%!rM_QVe(wK4mfNdG6JFlKGU+b|OHFO+3tgAhsmU=|S$j`C?~# zw+VILZ9;*!9?kUDqg{C=e2MbD*u4xsyf=9~zJk(d=cM?(ib6MjuSxt~M~U$oNOJt% zpuFR-H<^dS-a@LYw?W~sNDNDucUY4M;Je`HvG)YGPjD@H?0wWo0Gk5Al-T4Q!GjuW zw|0)*`l8k`_S}nF``9M4C@b+H;x1immBFwqFsy(B=LCVY*s&lF-DO{E|UL(ZzP)f|zz(2<3WG>>6#}(XD^RcVw+83) z(m%c#YeRWo=T8M6KGEs?hr4TuwkVBuP3rvZP)KKD*Ge4_Xq+~-r`;Y!#sMJ7y}tt$ zIPLGqJlfw0sjfPMvR~bS-rt2aiQeB89Q#!_!F3m0OZ}<`YNYp%j$JxQ>(N-hTIji4q(=VU}DjY3BQ%I0QCR zBZx&yjudP3nq5d-F; z9@J9D5|Ie=ao~Jm-r|M%WG~E{yfClv!d%2KZ=k#{%*TU|9PI4+Hy(FF+rc(UqCJyB zypb^H0<|d~;uBD0YywFR@rhL6`0FI*;jadyy4nm%e~ra>J()F$5T62${@NnAQw5jg zFA4E!sF4tVw{zZ!KjIb>JRNq1$ejs_TTEh-Vt6)jj!j#chfUj%>Z%dcwdowzBw~0j zING#ba52Fp*%XiAd8m>YzOY#j+UJ9#Nf(INg`k$Yh=@cCF9zp};XhtzTtazY3@-&A z{!3b1p%M#?%TOBal@!OzQRoiUE8=my5=F)pAjxsOiV7T`UClgvb`4TpT?-1I@iyr? zRwTlBJvh4T27%ouuq2mB7d;?|PDq*&fYoMYGR z%)_poNOg4wsAJcitVo3NE^xG~Nnm#iEZMI1TwdIRDhcItXS|@<_kyEE_lem3pq6@o zh(stK1ov-3`4HuOq1**N(#si+BNCgwyHOnNofON5iFG#c_rzoQ2#Sn7Ajz?OlnNZ5 zJ;pqI_Bc{qJps!8Ux8`&N!BES`4l)h?PQi`m5yR_PyV=d0H z`zY@V>igg$$Gdav0hhKN`~ga$eUjq&Az{uO`%yfeAEU_l2qZb4pHPA0uTPnWzdl2% ztIt6>1{O4^FIbTX=a=B4)~W;OVdZBKI9A?kTd#YyZb3`cT)!6pV*E79`{IY4fRykD(p;RW4Ua!f zqJk)m#`hX4tDI*KR0xHP;Xd5RwF=6OTx%2=tw54v*oF!mhov$PhqXnjt9GDt*u4$v z09GVo*d81m)Iiw+dAgFr1ckch-E zJQ$pB7@p~c(RcK|(hFm*7se3`;~>f#spuKulZ>`VqWz3^NMz2OiCee902u92Vo0t7 zAx_(epo(oDid0vJP!r-&+Mo`lOm`r;Yw=tPb}a@r=3jqXco+rHoWn$5IH;u#2Q4xB z`qYjf3To2?bp)uR_DE!ngX3y<&bgtFW~UPhg&6`J32LcPpq_s+!TJ0%+w)Jh*Ugc2 zyZH>yKhx=-(Ugzx>a2+G>h$+^b!NanV*rTn>PXJNt1}i=Y)BSTU5%qA_@`}y8c!K# zSEuo)9sTVIM4}xN1vv@SQjM$Y1%c!Fqc*1&Ol&K&pf zWm$`}a>p+l|L`SQ4>x3~8B~Cu$rc4Upq82mYJ2{g1=i=URL@@_&tE~$Un!oyQs}SQ zl#lzXIPR|j34h@cGL?(^xVt15ch`~f zmVkP0S_;nRrhl?JJ(g}dit^EWA}Lz*-spi*d0L3i7#xlA=*R~QN!=%e)H0UB_G6Ih zs)(}aNLM$g<*ay=`y(^&I9JOlCJVbu1Xl`bsWQ-#sQU#fwxm z@UDHDT}c+I48g4gwbUw5&jnTBd@jIUvwJU%r_H*g(;0c?OJM|4W3Th zDstOEE!9X&(#7>0;vAFCWgaGNN2;qBsAJN3tVmp3&j&}7E)dv-0!ubYF0L1$N>)g3 z?XI;i21A1`5vfZ-Ep-_IiEHcS;CxXW>htIydBh1J3;A}!}+YA zx&xfgFKD(q@_TxI>E!tZS$FB!o_@KL^2VhYQsKW0xdR_$Tn^rB$^C+T0Mt?sf|eNmyW@w5g2r8f z+70Sxd>C1Sx51oB20VU+X73>s3Lg>Zqo9_04Ako~kAw5M<{uCICn)b5_)mh5^#8Ac z{}hU&gOdjS)5K;4;Kj}AnfSne7DdK0Ajt#&IVx~Q+4Ic90eg|^>IG0ZAaX1w(-&Ej z82K-O<1l+!aIXk1d6;qJzls_e`Pc0%d}>F~ehoam_PWTu0cxo?iAfsyZxQF%^fvRb z=^doHdKc8S={?pYhW*KhNxmUMSygt6s>*KZT?o!oVK5NIPE*6y7~_&d-6>U>U&ls;`jqN`s_!6{Uop?pUFM$&!~|& zUe5ie19#4Rd9!B!fw2J z!#sxz{|q1u8Re4m59^VrVpB#T)m0`{!8aq}o6!{F0A^FJ`KnVeZw#?$%2>f>f#N(9 zw8Zca$niu$;{-uX1a&n|Lf+ueqegB2fS^4Y3^h(cs;jA>mYN3YIcGXJpL0(4rpnX2 zsj|VFD%X2cWg(q2gYwby!rw;Ej}D7ofNdeUgk_^RdeKNjQuk#PmBUh)J`<^~W>FN* z=muxZX3bT!yVnmJ12c2U!tOZ&41-!K540rezHXu-gg{rmK<0wFy5_OwHtKrkryBzH ze6rA`3vdCbr51vEJ}3a!vJX!2eBe8ATIl(p7k#jZ^0*(=ct3bp{C<$nITWIt_k)rq z?gtmM6oxNBs;i}x#YA7#ppIgN+z)Q}?Eo$3XtJ<-nc$8AwNw$P?|yJOAy8E;kP=Ww zRVgdve(>4f=V*2rS*R)(Tm`75R)E3<;a9jQjDqob09#J(q7Yg4g6Z3cLe^aruA~Pl zDet>r8sNj1$W;aViHAA+i7QbW9iDW(T!lhTG8{)$jk}-h)w&ubMpa|RU)?HdDB;X9 zwalZjCQ{rsf->O8p!3wRCUJ$d!0}47T5$D(Yv~HP1~qbp%xb))v{!8ZfuYpK3*KlS zduo!_x$%qA-i^o9w{C1#)~<0^RVenv-@*2=fmWdP&cT203G`SCSvqu`{Kk4voMJY< z|4KLeIMz58Z(tr4A1^2y)U|jcYZB|AP2gzp34%LOa4lJU5^BU^uA?8`Hlb|7gz*pG zIHCW9J?Bs8IR4?ACge_NIwPQ65wshCq-cbr{aK_TaY(X^|B zc4jE=N`ldrs|0*CsHLs}ZGOeLmMEyaPEgl_x+-r#-o0WpUC>p#vXy-!!BDwFz&C+f z>Sj>7=Lorjyak-kJtugR1)ipKCyRC7C8O4xEJ`_9+)8=hWN{n#@MLGQNIa2#J4&Nz zNt4A+6v~PO9qbO7Ebc^!aR(=h6?aj>8G233qsez8#hoT7n|vj_crR-blf`}DIP~rp z+yjDZY3MzO8X0=$w{Cp@gPyV4;ac~mzx!yab&rQoMThMY6}v$#^)TmxvOTPE40?ol z81$&19s_j^dYmh}ugijm95k_Ku8R zMxi^!zamk46(zk%D<6!w`!XDPuHxFor2HCP^E?xFD`;w zDhTR%Cj{>Q^bUOHc4uVW?tD7E(~9zjoLk$PA*a>phI>Ap&BaJ-APhO4mYn}Qx(%w> zmQH_L$ z?24?xb7N}kFb6Nfx9&#ln%He^wQhEzxAdZj_Yyj`?%m;yCA~YmPxAcXo0R1N=-!m~ z4evhS!^_+ioU?%I%U{m$&B-|-Ou;2NO17|Op+CcC5ddJRwQ9F0cMLDPZqX3AXSxC-*W^pvC z*!pEib#)9iA$mNOE25BdXwh`!aoPugyyXO=Da8UV0ku>qX!B!>GNPceTu>FDuF4h2 zyXO{7%WQ3U(2f!em6ZZEKrOWrl-}th>&{i+eBL?38wSX_QyjAHl#i@C<)6V}P(^t| zR+|$HSzcxuvY@1Qa^Rh601R1BO3uHYtU(os{{Ji+>#(Al^ zc)xT=Q}86MP7Bvj3T?7PYBi{(>Oo5k|I%;`QP90sQ0qV)-RqGxxCG?fzq>&jAFz)l z78;Ke>;_Ov9S`dDAsd{}OaHhJZKS**M;HAxL(VEj8*(~u3`4=4iZ-Z-$nW9;l6m`OmoHy%-sd8J1Hwg4I1*CUV_+utM5zHwfCD0bOCTtd=& zLbXdmjZ?QlS5HWT^lY5Bt!wP;o{$D)b1FL~LNMK>Mbm zGqjicO5WADbIj4YHY71zI;Xgb%l_bmd@<)q5B(P*-w}D#f zc0#aQcMEsxc7pN6bdEO>PW2{2ytwF2grmHP@JLRCcThgQ_FdgsN)!VsTsq--q+)w03Ti?-NCn)`6NoCSxFuKDHOVgxKGDB{4*#ro|X<@_AF(bKJ*;( zX!G+(b+s3iZJvh>iWgXu=?e~bJb6% zk-16>G;aK@XXD8odNxkQ65_b;wL3KXXVlVZzlfG!K`r$gC6WU9J8_PIe=rXN|3s>* zzd#)W|7JxZjw$$p8x7Qu;wBsP|L4dyh$@NWeLre*19k{3O==}_twAl-hLA)YQ^Ea@ zaXgztur1|%acl=Zyv>PY?CoE))WHX!C^|kVi0x6xAaa$`p|w1l+z~}a2ax2m_fAyc zIIJ`Ca99_ly6Otbp1%NV-)^i)gtR+2daQ@wdJ3+kkoH22gtV;bmS44K(C&=_x~z`~ z_65cHEkQ|9>`$Cy)BxsT)PYEKbr7g))IioG!gw$^8Z}68g9VplRD3uNL6wB@%0D$D zXb%NTn+_4VLqRPyjF3ba4+G~5<8j_himW@6;y}=yNwMea&ZGvzIGpl^Y!n_~$j;yd zBe56A$CD2Sz>vK_$@#YeN1%$WPeY2kZfZhE@(_|EDa1WLt|W&x?)#t@cVkC1RR;o} z1oF};hgM~X+(=M7R18{T_&4`5iGudgf*J$rY9EWd!CgEqvMy^Cm>smU2!_gW0v-=) zsR^K-qb7p$IqLtMx7K)$3eZuLD37nGS`KgG5^#wsOhZONW5 z4m%*wS+m3Bp({^-5l~CzgTe{nt9aR+3&!UIJeBQkK#li0%VAz;8Q}S#1AQ=$^1d-Y zAAI;BXUr#FNzhRmos=}}7ogCcyBEfXeF2J$g&@hpei0QoBdm~lG@hTb#4D$uZ2Wx< zYAGucqy8vx9AHNaY?;842AE9b$Dl?={bO3wGaUj?YIYGy=(Ob`Sqy5a5~7lZeJOE{ zQDw}-sB)ybssMG2TEU9MsE>l9QI!HS1eR=+EG}1~N=E&_&Vk=FdlgukR3&oNpq8p3 z1fxFwfM6{+Um&q(=myevyi?}|a)lSjWelWAd0!wE`0!XKkg>1222uytp(q+Z@$pUx zEEKwv_v&~Q>rrH^21$?f z9^*>NMva8=h^B|S1x60GH==+p+a!V~fLiKAf|8Ef-ZWI|!Aj!eJhYB2@-OD_Db{|q*-49BiUD%)=U_~OB4}zo5 z9un9tfhGHl!*VxjB$%~Y(|u)uI?a9}U6FMIS;yJ+Wy%}#LBsbNI+Ex#V*wJ0P!85quYfTMNF?XK-n~jV8vhzn zUA+#<#^2GP-XNS8HZG}d#U|$9rn@TxTea|;C}VrxLW*w>fm-Sv&=SLc$$FP4=zUL6 z`#>GN?;~sQ+QfClIV%G{X!Zx-sPRLjy7~wdUlRiLJoE`TpNH_ouInM+OL4wcr_V!* z9{QB>@onBo@onDe@onCo*yfF3RrMJF@oiqo`L}sLM-^N01yWsoNloz;!B>=Vw|Q^J z^M@L@m13Kwh7M|ausU#-7XF%|XxKL*`Yk99bU~Z%^?pYbe&9cX`X1Ezfgg}f?Dd{k z8~9MOeNm>A7wD<+1^Ntc zfj%3Z>30C)3v|i(7wCVWiY@sQDQ+^U376f0SfKw+A$Ng}*Vngi2=;0`^^NvTx7G!= z2J)~MC(Y83;vLHe1;a( z^Hh6qK2PCG5^ks3tDcKb2KzZN%*%5qrtuucnb{0CJG@6|>e|AQpdok=1 zdw*E~pP@yO(FG)V{_I8t&Q#T%d3c})QhbIMlwG9^pP^+%V*cz6j#E`1f%O$w@>Iq7 zvma_?{`@E<*6Y~7r*?l7&}joi@IX*}g_fYC>2n})j!g$M51R%d)zx57$EG2yNKBtY z!O^Be1a_#vl5LXda~P^<)U{iV3uyLX;Aqis5jz|dAD|^7F?*(g^Mw&#@p8k6tUG%y z_AZ0E7sg@i@JCSI2w)FA{Gky<63sC}NM!b$g${ot07h$+7?Nv4h|~6TRI%+DNOd)m zn$Y3rVdHZYg*pPt>GOm(vHkDiz+uA{t$hCYkx z`o{@sJgBRG0`f+`xc;Vfn*;Z?wI>n`m6HTK8Prl!Ks{GY1?O{BqvtAQT~{I7!ir%V zT{VsJhHQQo7_!?r(~$j5?yt~Q(*ZDKdsA}$&CMC8VpFn_>MDnt;HnfX$!AiCyO?xU za8Udhs21ltI05R;v)zHQ%lHjC9Pl2;Is$FK0AU#c+k^2amF*S+3`-O z+3`-Qd%Ux-F~7cb^Yfg}&5m+H?om#Ep5iQzZCs?`1gTd5r_3*Dp%~6?M#idnl=C*v zoNn!f10ax4<6ay9?duijyZvPtw{MZwKi0W7&Wj$j(U7UVaagsyH-w4;uzFr0PO*0N z4vc6_Z|u6g4-cRUp?cs(oK>aw!7ptXAM6@i&fKn-ai(=yABcZe2$g&L1iCj(i3QsC z&znW3BIL8hskxw*ngd#5_&0IGM8WlWf{K8;uFpr_;BGB1#<87&%tP(D1ViOK0nZ1u zln(0kq6Of5z33m`MJ%K|b~a{kXJc04`|zp&<+Qf#IZIiA791rKM}t~w8K`fI;}}Aqv`8S!K^>*VtdK1Z{hh$s znq5L3x=ICD25PBtP|q6`;C$X#>y0U7-7$r%JEnY}&5)(Z3d+aFRAGEf&GyC=-Ff6bJv!!95sao(3%CZ%CdX8Ru94;t#Az&i}yLaj0TbHXzm2@zezGoRr$2YzjFK ztSxTWJwD?^b_54u#%b5WjMF}T+>DdvR5@vG+`qbW{CqioA4c}T9)X_jwA0g_ak6=O zd`Rx$V^xR2A zp?wX4+6?Np?_}iNr|0hK7R=~vpF%7&ZV~LMpq4rf)a%EmgY!k|ACKxYC?9`Dpuh8s zz?{T00`f-8nLxy!5s;k!8G*A<#ipE%R99Q63ZZ%$3&(8~a-I=rnmQtQbSSTpV6^2N z0iO$MsqLW6&&*>)LFIXZIv>(f?GxN{VgY#P17ZMDW7YX=cP)l6`>bd7qaR0_V zmr>s6gQ+%LWc1~kOW5d#gsY5wvv3D;IRM50lo*mbkPv5;aRsW_lq-?y>MCl2cShkM zs;en96i`lsW*2tkfqZ!4KGuRPhqKFt3_!&rLiJk+@d2b1#$a z*oFHx>%v{#%VayZ3#YOR_hlFE$u8WR7stN5IQEad`x35OxGtu;*Ts(UYvTSzxH`Jm zL_9yy8`n5o6Y*pRFP6s@@>;tQPhRXRgt1=~YQ38t8Xx?tL*6y?1EO@T_~ANGOI;6I zVjS-C)(u3#TQ>@72dL|mpS&vaZK1hy^50Xe?klevR@;CtTK_WT- zAbA2+Y|4{Jb@dcA!B-tHg*;6mcaWT&_|{0%Ej7U-1M{Du99s3P$UO&&r{qDKcdETa zLHi4WdJ)vs{u1(Tr&`R9d^O%ScxWurKh(BqnF?MNuwSMqe&7`meHGMFuYr1QdmWt5 zZTLo|dy(`##D?2b_accsclRPGU*39y^1d6o( zekb}aii~$alJ7*{qXMUE?qePfc^|2+J^+P7!rwQj4_T486a5Gr@A^I#*e3!@zU$+i z=%=WWJJCgfrt|BAO9J+1D52Xv7s)R`E%hZ)Nw=b35$727HS;j)8>G7W7Su6nKPwWq zqThj|QU4Lx_X0~cN^V7eK$Yy|ZCDp94%k0}rAa@D+|Qucd?6%pFZwGuUnE<;i2Al< z@e-vQ$=R~`LiuKk!MTaW;0?1e0DcD`z8I98e=+z6s@V8Hkz(_OnlJ$Fz+~|^h1?sg zX>HL%XLpNTQir>yi$w2`pe@F1) zEz(VVZ=rNTX>?vv@9&I4_WqmU+Agt8>$wkuBBKjPa_{d(1x`on&OAKO11a`lKK(YG*T4Ln+G-eS6jpGD09@Nn|0a;^# zqw(hpgZOCXL}HVmfa{i-^ zMW|vk3Xx(bftoOAS7FdDp^$slVY!cwWHnuQW$>_I-crh;RY!^3(V*B*0BwHYaSTz= zUL>gHpsw~}Lvqsyaps%jP{pQfK#J`J zYQhM!vAaN-*+9}sv#>YSqiOd?!7*BRBPGzDO(JmuC~oUPON@M<-jj%e-UdN!26gnF zjI2=**V{DxlVGK0pF%7&ZV~LMpt!9ErH5L}3lgV;^LeP;TU(ZT8!*1(pd#<$x`->1 zGbnGIfQ{zJX_|2&_of#aCy|AVX7T>zOdyQSC@~~=G9iwO&O#NNayC-j)>9Q+6vJIT zWzGbWE?TMKMa=F^dp`><)xwPwL5t22k#j+Da}Qc#oa0j*BMOSo6V&;jj^YcDH7<-R zZd(0CaJyz-NGvp7B-o2Vac>U_ABDf-Q%aYD@wo_RhwkzlCqC{TmG46bc&o}?f8^3d zmr>qm#0gjA0L_qx9}10g$-+f4DP0bPAx}F<&i{DB6{uoMu0)D^d#b`%TZ&2PY6|f| z2Kp%5os_ya&HOqzAeeUz<V z{lVQq`(}cn@)iN#3W}RyP;a>14$kMPmY28xkCoa^$~XH!z~aQWNpIjY=63)P|3HA` z{2vIo6IE==T}W{+Oiggqjd0Z66mq{1aQu(KPNBSe2u4%x74Utaxcdcd{)K=Ch=R%o z1@#c9t8y3eiBAN)@oR8d$lgsbR6Z==J)pSz1@*l1D7gR8JMx6KW_vG5)Gg1aCpc_Tp{;AGqiqTel(Dx#{k|u?Ee1hAPqGXGP_6ptv&zEiwGx zmETJge)t7Jy$I_5@Jq-W{D3^W*uFsM^`QMS!BF{%fL{g0y)dZf#Mi<7k4}`Efj21M z?DMWm6Q3)+grA~&6M*>lN+lQnUTOSurEj5%O?ewB9(JcDOjn&6)Vq{%zE&E$wN2=d z@Oy-!A^QaSKB%QW0B!z>(hrG(!jA;?F{q>P6J*^ll*SHD4IOTON+=Y5CeY78E%gPc z=b10T`8)j@`@un$_)6`d#kA5BgDf&(HsHps*+Pg61zD9BMyD^5O z?jNUB->?*xe~VOC`zeaPOI$ zXyQ-L{zM3r{w$DRKwYK3B5$;IF8;5#4;>J)eL zrhNS3e_j0Ie{|yFFS8Ne4@!w&{3YkV_-m+QQvyiwX){nZWiHttzV|(Ni*ZuK5LX|o_xg5t3R(B?CBf1;pqfS?Wpbu=D?Y+|Cmzh9_R zz#d2}G#)J2L7zGh%t+)DTQt`n9C|ijk0KZv)KX(WJ&%nA z=ku8FO9H-+C;DEq#UE&Kd($TNrYy?GpRxET{)|OY;u#AWYU2QiKVu;||1%cjQN^ZA zK&q>W)C7+O;IT;*a-OkhI%#NVT_A5V!Dz}90Z#?R$IU>SKUOiFD5#tvsBBPIWe)O* z$0~A%h3tSmlVGTvCE(ehmdXY7yfX)!&pW;cG2sWdKl#3wR_}R77LZ}e$H&^H_*h%+ zeK(1BA$b7A$C~8)V=aOzHYFb^UQnkdjI~D^)I7?#pKSk}C*(~7M})4@!t*JC_UIz9 z02JRo18qLy3W$Q~>b@XxrDly_Z9TCb1*h`3o#-)Ni3e-|ZgL)oX2F~Z9e>_zm zL;3g;JJnfY7sr>_nSn?)zmHl3M0|-YIsX!SIjY!{Vx+n%p(?lte~Yh_GR_LSDL*6B zDiAIs7ELJ^Yy~L3iU!(zSsf(`8Y=~5fI1piBAZxMUot9$P2*L>LSvO+t3mN_J*elL zT5$iPbL6QUlk&~FPf7EsS^*I6K9ciypE^{rB^FX$t)?dU#%fUYlyRr(_G3fiweT8Z z(UP@-T?cBZ^`Ol=&ap&6<8gx80P1Kw9@#|4*_s87woNQFZWQb$Q2e!CQ1~XipSNZw zg7LX#p||Fl=Z&lx-kN8eH?of4ymJ!e4Fj+4gfB2wB8iq7tB`O--7_$KvuXgqs78q) zxf(*874>FRu^}fT)zvA~1lLS#P+KTd4I~2l<2awf zGl_!2vjlZEsH1QzvIal#MupE!2wiG#BNPf71$qvsrOpMVV_v6YwuA9GW~}F!G|w?Z zJjeLHWG!#j#3&#CwC8~Mr#;KOPkU-`%y|IBKkX?w|EE3AM-|&~0a84XPfc*lLOhXA z8RyHMv2~L|uZ1rr6z#Y~pqGN;liZ-qKka!rQBZh=psoaU6kdg_`(@AAg_A>X*;f+^ zh1Ur5T2M<}2kMQc>%sXvGu`t{rstXAo@e@dU8W1W%ng*sGWiEAlgs1FWS))Ph~nsX zBMnIt%j6v_h2=LP)z!@ub*@dfu!hUzrkbfCE0B9Dc^H11Aa4i7N4P*^9-bl{>=*>f@VpDb@)zxllLKnft&%=~)pS-DYp4w7Ks`S_ z5ANUiX)on{$4@VS4_k{tQ+$7y>_za=6;kTG-t-a*@z+l=8*NrEx0QEOUqO-aGD!0A z)2meAOiiyb4+p%CR9A0+(gB4H>P=Q8PM_Wa$Ag}?1@?}>k`8+0otSq~Bd1S~ro@Ki zg`TkALjj$(PXylw#oaAIV!!uEqz{R6Z2E|K*z_?{U3~)T*z_qY5{FNpful{I3+xMl zCEFxtuV11{4xiRULTxqsE3h=_Ymxf~)KcFPf*J8@&WQWL_``|l`k$lKxX8uB`L)No$_&kD@gto{Rl@ih*849WT5{{9|SZ2J#Lb@d}PVH%i< zCF)NU;(Okl2`-!)!p_~#1fwOt2>4e}yuA%tV))}m4b)NrP_L5*!TG%N4=022)kpk)D8lEM delta 43475 zcmZ`?2YeL87iRB5Z=s`rqJTyvN`N$?)KD)8a1aOx##{==lD3nhDxQU4Z$v$N!w%M{ zv4ddm*n98YKlaA|``+H}Z9)CiUYhY9mnA6Is0`H(a=~biQhudwAoDS|eE29%yYvq`KBfE1s&Ab!Hib z0_9c9mC?SetGkfVK^V*HmX$X}D;lefj>QwT;&FkEMyIkKpf_rTc};L(Y>;~vZ1QFvZ+gTJriAw9%UMe zM~t3j9dN9zh%K+rGSW91y~0NCQ;j}hqi;q?D^Amzjef}*t72;#QLukm00owhHU^x2 z+Ns9Cu(3}@C@7U3Y3y4vprmBh9{eXF)GlTWiku$pQC8iO)!3(H!k-;l4L!4~?DzwRkaifWsE#wr2HqJ z{&S1A(HLj?k6ONLc|97_vTbwwB}PGXoY#+zv^wA@cEI?sF##PAx~AEfn6y9oeiH3J zIc!YXyZsAk|JGhT&(Nx-(#q4q#`Ksm!`sy}tzVkOUkZhdqU0|Xr+T`Czcf2+%-Q=d z&6Qu;lm3|&5A&Dig^l?!qtx4<3qlE3&*T^jJxH!m=0Wm|MII#ISR6`3M;B+J`;4PK zy;reBx-YMEO>JSxN|m{Ilyu)xNB135+`)VPmrMUS3K_?G`me(3zevlSoBJGNR7NA- z9;>qKSWP?DgpFmijLw^jVT)nQ}J-hXec{N866gLYh0a7@9@^9y!vE!cTO!Oq(Y zcHWOuwspV$FKO|0Y{YS4oDtoJC&h6CpVBhVKqg4MUua<_$z+hfKZ zUQfQ$`l-A4Q>|g+ZtJJ+NxAa9{HeCEao^s5>VEmDn+|A8iyIH{w;l`|55qXFTdb@{Jv#i>yA|>F%?L@tAa4WnOK4dHupg#cPYD&mMR5*%QUZ z-ZAJ&=`=?n<0((4J~7+-jhT;od*l4pG7 zLGq2SQyut?y8|nXZ>0nCr%$U1EvRmoUT0oJzH@Zo_aU=gsgv#p=|4vq<3~^b{bcpu z|ICwacl3YW{`=W7>Mu0vuVLdio(Y%Yr29S9fq&4de};{}_HNa`r30@T`Aslx{6j zNuIB&4>;}~SA79VpHItPRK9F_)3UK^@_5p!epJ}8gX+)fA9WA6Y5+M5ARL9&K!S`i z%t5yof(W$_Fx&*f4K}KMTaKMRpu->_`o1lbiu%+HM$+nwAxxt$_QNr(_J`!T^?9=z zN`-aruMR+tU2&k$h6!zNT`?RQIn}ZPiLCx<$8O5Fvh~#594$Tq3U8EIC5;;5kea!G;iAwV8m1wRN^Kx@;3$I zqmg5a#z?WTkTI1_hP22E1hQ2QFrS;``oUb4=Lhptz8}n26k7rrQ%lM4c*rrpd>&Hn2Xoc2elSl}_`!S?aeGK5I35qF0wf-i zHDh`9^x}q?(wk%;siq?3AvLUiUyp|@BZnU1D6FCcJsz?gnB^h5c*qJMx=s(NtVPo5 ziaMsz74{MEOC35VERZ?m-WK69g z!{Zxk$xHgiI;PQ}<8TbC;~|p`T2F=T8z&&g2E~PTqR_kz65lupDrwOVg->bmlaXVK zPLX08AY-bT43BSY1m^RNQ~hABI?WH}snh*nzB-o+dBpMb9veray~gSUm@6HT`*NY&UuVIX3-8 z;k_h0Pt&{aVcT~>C2pk6ZBC27j2xTviWGYlGNxW5!`0>4>UCh=X9emFKbWiD^n-co zEkBsA-WE4n6dfJ9bLD#V4k#H>Gj0iIuBlsAUDK3VJXS6}??M;-K2#kwLllQvy+@T5 zm#Fud+pWvNibT{01YyDtaSW@EAbBFb)vP|&@aQ}G7auJ5lKaY>))N=JuZ2Eg0T|^| zDexI&OnnX+(KTw4J@WJu4*u^@xM@! zIz4}7P3FU})am&fIXpcbh1Kr_Q>Q0mg+BoC?7T$_Zd8BD>G>BR{ZF2rRevLCS@IvI zVab1S468km^zplKdbY!(_tfdBAx9q%;22gxNdNs7T|5mMaq+oJ3ex(uM~=qtAf-A& z##E=4YmV(x)tQ=PZ+2lCZRjejZje?Rx>IAHXFZT(8+r;aU3i}JOir_2&`28wR(zip z?~NSW&__!3g^a0wWOz=q{=j_hFu)Jys)2qmPwnFe^VPlq#*TJSFbJFsy)TR`9x$hF zSxsqvL*<(6Nph+UhFb3rZRk;b07>j=2T2VfVIOF$k#|1=g8=A~8jO@>)cv8NQTH2) zL|7dFNvocUo;{GIhJnkI-$5I0~ygg6`8UACP_8jcch` zJ)lDYAbniRM`!h^Iuc2%caCBjrW}uBSWSSWDTknQCQ@Tx6(=D_m!B-WDZ<;E%NIf; zE`LPheSv;ck)!daNvY|OF*SpyU3?}r$yrbC1AFolqG<~QzqrjpSiGj)vN`DrE+>M zr6T1i$FTYtQhmx(%E_UpI0~y{38p+np5{~lv(Dm(cuFM@Jwi{3Rv~HiQZ>`)r5YT= zY8fPZX=)YSkPQ;qbWDubG&|0t%>vL<20Agz|Iq{jB2RmibrtA)2lczbKvT4=BIP6-ku5&J^R!-_N)o-qQCgiw(Dstv zTsWXkXK{1xSDhhVawZV{40_43vyim<;%uhT7w6y@R_8*pFLE)SoJWoACFdi@zStzZ z7UAu!FD`&ayyT}q%cLO#6K4$92Dh$n-X7?8AvElli=>7GWK3NwzHtdPN#D4XX|(4u zVOli}*eY;^-L z&s8Ny-RK8%)lGgdPu=VX^VKcxOZpaYJif6Nkod-^F;y#a>*}iuOJxc`-AYBuH*RC~ z|4Nfn1IX><&@~)|)g1(F*NDmhawjlr0J%$CqZNpL7hR+JZX~U)xQA(U#l1L&RU0I_ zf|2@tRM?(zKXUAe2ZZ*Z(Dv374?!cIu?ElicDEFrb9%pr0kJzCk)qomV`@8HqViE{ zk}k1>X|!XfupWa#*XIkw|Tp*u*QiK& z#Oufw5Aco_Z;(WnaFkYW60}{y94+2radVbWy)7>B4iNoqx%+q*NvkW~V;WuYK8|7a z0VL1P8eCI8q{jA%kC0Dzv@zOgCs`#Q2bnJfP*LgY#NG zy!xhicjVYdJ#Y-Go{%w>4r%sL>XA$@@{(TDn`tzy4~}8g7c$wjepJ{_(;qoDZGg}Q z3eDRzd9b$+RBT&IpKEu=_eF*c8YHC#L&nq)5?mdet@Z;JDt3W6YJWeNtA_f)JavE{ z%vT2n87WT31DP-IKxP;y9-kQwNPH%{HmgD}(v6Ba4RXgZf{K*S9K_l`md2&7DhHE8 zmvIzU83gSiBwEo6Hynomqsx4VNybA>mpKfOeki#fml+An>X%HW(JzPN7*vR@u( zR->q}T_y`T_RDCYjS-r+U-%SZEHvUW+qBlg>vm}IYyj+<94VL!8B=*A$sdsNm3;D& zzA}z!w5b5cusRYl*`}kYuzh7ba%>Z$mav*AG;f>4S0+IvBh2|Xe4xcABgYm^kz$3A zF*TJ8mtSP7X~06~xWF7WouFr>lbS(rg^QV|W)hs~0`t`@ah8$R2onM)L$@C+WoEA~ zYG_;?nOxR{u> zBRJWzbtO%2Ju59Kou0d5^{mXCEc01H6{vVtx-9vu)S*_@RN2o;Yna=u%fWtDx{M$! z5XCX9mP674otu?Tg*+p5u|Yh9B!G=Jwg`8%%8E6bZXZt1ukSLW?#&QndS06!Cx3RXhK)GEk$^6_aWm$7ox zYUn~~E-+WEAsBRld1@^|bNsQoDqpP=(6aS$$XTXdJ)?Bh>hkdD<*Q`Yt6CioC2U=h zw6#O8)>9?6#+qw&0&}}{IoP(26NF(-#4)T+f`nm;J0V^=nHn+n<<@$KwAQz7EeI5y zLJe%OLCQ2k#?(egx5-Z>3pJl6tkWT_n$JMmvU%&Vcbpc8pGhuiJWIG|L&nrOkpJ;) z`dp|yH?O(sJU^JH&i8})Y7@cZEaSGAj%==Y9XqzTVnud&Q_eK=zVrf6qsJ7t)1v0$ zR)=0)NS&-`1d?udeY3ihiWM9nI3wA5W$Ss`)XRv$ z2$u`_3doqc5;79C)~KqhNI`v93*{QfWPR6Cu?F?EHr?}~7T?S)s=7{i*F(nC4Um!O z37)4z2m^t8)_Nlh^n(Zk37+S2Uxa}K&$W!X1-T48z68UMT%*@!uWDM6SFC4_SuNM- ztj08Kt}WvK=b%#j@ky52h3I5{a~JY)DPyX9RgZ*-YHJ&dS^pD28I}Y z{#3nuQu*X5nHA>qy2q(V-55QAZ1L&do9QP>VsAJ~tEUJ$?u>9_^fXKJNlbg(89ig( z89fV3e}-JoozZi^tloN_X_)o}9K-5GNc!%f&FUp8>^q}f$nh@ZWud(yH1Ayq?~GoB zM(&Ijwrjb0+v50Z$g?+ImvV1F#?+hSNc&SaMsJaqyfJ#4X*B5_9K-5e$Yhh=qr$#1 zdLKD9=>wsCC^T=AZltlWH5_bt@=Ih>8i z&^a858#qYnI})rF5jhy_z9%3HA|!PI3RyP&0V>*b$d5>b)lZN#>Y!%zKNh+SRGdLA z+aFyR+D$4f^s_L3fsCnNAtSnT3Bzw>p~ByV^#^3KzCV%FxmJPoMRtVZf02p${ubsx zkTLZyWJJFWERT_L)E=lp{au#JRqfIM_H==HO7nyHDnPIsOnexY+pAz2Z-;dIk=K;$ z#aT_^Nvbq#-g~Bjt(y9l9cmFR!694l1?~=RcA<=q$d_6 zbzvbsv*IYlcH^5(4`Slb4`Sj_K+BN_iIv^M-e53f*vrHlSC#dP zRyCHG*P0Hm#J?tI8(s_a6iD?-24orMBOdHT)7%=_7 zmOKCGQ#Bk(>kJsdG;Dbgj$w5$By3rH9B!#HsIf=7Ly+UJa;WeQ6P|Zi;Yc?U8VR?$ zJ#|~KUnX)i|KU>V2*{Wkm8i(n`c!36l^oheGmS=!5!zTts}b4M*kfA`a%@Dd@bZMW zw?^bcBaN8$-0om}9CB<#fs{HDGNz6q!*$|htMR}>7r4M2HG!b}HX>I|BerXgEA!RryzNuozM zN~;+JZI3XYGR|c2LMhq&M0OYol$~gSj~py*>!8PnnQ){5Oa}Z zUxbA=PiTAVi}}#VrSBR%dEUP@v1_B&uM`-2WPy}j2pLml;ue*Qs7kuUVy4lOqlLBv zGTD-)RM=i|403EqxzLUk+TL1H0gZUYO|Jw3@d$EkL#33ef{dwZGF&~Ft!jXIhJ+lo z%n#t25~Sky!^ zY!nmjN=U?!kP&^ZqsrA}p~^MFS_^5_xDIK(ELr2(Zx^J+k0Tc~9xvSWkTG=vWJEs( zG@qU0PL

Ll|QM zN9DIeg7GcL(6n1|469oqW9l}@h%V#CS+2nZ%SfRLo#_IxjFe!r3&b)~g6mu$mXQ)% zXqmQEOdB;{US7L6KRVM`I5rYCZ`<$FsLIE~Vz}`}buV)Tx*V(* zkX0K&nBYDf!|HxW@m`qV0cxhub2-)Dn)BmvfoTsCgApDQ_QQ~fEg>UO>pf$&jTBV4 zT_}%2S{3e~W)2Pd+y8C}#CI}_x*ikaugja?C8e~kp z4(WKf@&+lW@=c+<1(~e!Z7TS1rHsS=&W95RoEPlV`p|EqwD>zzq1tz)%zKbA^*$sY zw78#O=co^Wd%#@vp&!gsANj$2^|64M7kLfg#wW<&t|yM+)Y`K_eF{uQ;purbQ|pbH zvsWxPm(zVlMe4Tab5?Yv^jvCU)+uU zVEWf`AN(zn)_DCL)3E3FIEK{^kaW;v;PF3FW8e1tgdA@+{wKWM!rR-;#?R2m%|^zb z?xV2zrL2LJauMNfnAhLZy9PuI`WK6YZ zVb>{>tvUenTpDszM?aXWI{Cpo)!7f`t1j(%i)`Nvbp^*UEs`^5boGK+nR6SnmdRal zH>k~NkyBFBA`X)3PJ&E}FaW|C*@J*gi%7~jEz%Pz+IvVk5@FQ~l4sGMh$LAkIW5v= z1hk4ks1M1oQD5QqgG49^>7Ec7Ko+VTD6D-TlQr&(q%|SZHl)4wU?4t-T+}#NxI-Xg zYCp*2jEH9=47ER09^+%K(hp*;(hp*;Qb23C8%C>V==ODaQD#MDW4T^7Jxg{^QNzJD zuge9F>#~ETMv&pWE+0fduFH~gUY8GsidMt;=-Vs_xq6_VF3KpN1=1c zOOBIarqS4WIO1(XNUO1>)Y#+X0_51(g~Fp*edDD0ipL_T*xI&}25D^_E?|)Y;=Z%gCYcI0~yM!Ibax<|4-Bz^rShZu-s&K)P=FPA$?_ zchoVB?x@ERZyZ9hJB|$El|yQ5-!YJ5cPQaC32$${6N5&4XICfNclss@CTZOg+XGsM zmghd`(o%kNmzF!0?6>K|zKPjg()zchkI;_n6kmyw?6_6(3#%dVdLnD{IMZ74lFqb_ zX|(=09PxT0q}BTM)Y#5+0&;A9TzDr6&(nI-nNEUAoaxO>?a5B@laXbcPLXmOAn}4C zDIRCq2+T9Q=crTtAYM=;=v_B`I>GZ@b>am@ai%k@LH|r}9FKh)N0%0_UbnDm(I_(j zJ_~B|vG4Dx$G#4dI-3M}?3;X}{Tu@F*jG}{$G+!6MYE1P4~eikACh+6hC8ZFER}rj zTb7A?7>&2S_`iMPq~Th>#2K~0E^X)LY8}#wT38E=cY#!LAtXXqNcRKb1X=j4i-mOw zr1evmB5gepZu_V}i>1XcBN?v|ACH8ug38k^x$0^^n5VAsgZb)Of@fJB zu-Uxo+AGN_N0rXZtr;_?R++1Ut^-?_^o|%;M&#HmE!yUaq>2;0Bjh z=c)S%dat<;5X7r-vZ0WAuzkCH@3{F87&2~V71!xgv$N(@&y<~Y)WcNZUvk`hBr#{2 z);DJxQI3=jy4p@KHBxe*d=!v*mEEXzm;>cbK>7|DD5H-dZFR`wOv8Fl;22g%sENxkodPT&?WKp(^{9r z)d4Lnv7t8DH!@n-}C4UWAOPmsp$pajC2BF7lEC<;zT? z^{?O4hX^-&QVI#!s|G3(enS-Q8kT3}vnG(elfrAK40ia8&5Gl)n4~B|X z$-ohTDkQ8D>a`tlDhq``<)gKCF_%@>Ht2Y*Nt<&Ri@-J`rAQ_uqE$#tk~k_pf-F=# zN?2KttQhr60Ngoudc|~JNtE%)I^ZDqJVC(xq zTRce9_a}>;;9#kv$T)zEm!7!#BrPNH;nUioi4RZG z_DiI1K!IaB2PP+uJz0acMC*~bqjR7p@!`o@r^F*Az|v3A4sV-yine`7hbe4H`h}AM zea7fnjuuWQjuy_qF|1}n^5kIyXCbZUnGI|!x<+dl77vk(jw=#!F=R}YKt}ZO;CcEe zN6m)H1LmqZelSnX^@I5;ETGkK^Q7b4&mQK3A-z~pGAk=HvMSt=-1w@L3iwj;O11#m z;uF1BvV|nEcO9h_x3W!L$>iDVB9=y9pT;!^ixa(m&<4bh2Bt42*R%9$2{5bomog38 z9)n|8l|%AO!Nc-nsj#nV70B@t5)oRZ(Drs!tAa-Ek^XF#`0H`)pLjI@c1MjATm~6a zQIb5@wdLd`uWLHfXwwQD!>SfC*`_)w?8{m`a%@wB&>Ds2ZIi6MGN6*n+9^+JBeb|e zmQ8Asaxuu5T1kp)T+LRifQ4qcKrG%RIMD@S@h(C4E+kmIOK^;fiN(8eSvvxc>Tkzj zdmJbkx*eZHvP*L(l~<}&Q_Uv~$3tz#Cu<$?iG!uqlVQatd>`Qi0urA{${C-;p`uj} zI1ve~=Y`~HGzAY1PG+fOfKv8mN1o>gw&4`NzhluUECpk2kV4InSkMdU4pdGh3w56+ ztkWT_s?R{$j#Mt(rM=QAekRGN@+={r4T&jN$Yi8a;WAl{Iv1*t8^nrUKZq5*eh@2q z1%$zOz~C*Co~7XXtJbWQcb+4+-e^D7pbb0Zg0kvc&yVnybiZ0g})f*hWoj>76< zg7%%JoS&BfN?s5H>QeLkybPFrDY>5W^KxL;xo`#3uWAZg@w#v3>k{0PKw$q~MK^F?AD3@)x9TJZ~m1d4k@; zG#a%9$FSN8X*KFrYV0%gHssi-+l6&D_8#1Qu zA;sky+3H?kp1~WDtsg{W>jx3p`oVnlfSjTB1j&QocpT;-K;kfq*XGTy$Qx6q%FO2p z4^xqHm`7Os2h!w}!)zmm4&x}Swi8S_jJ&)0C@?EDL2%0feJ3D&2e}@Hc?_7EdPr&i`%cp?EUnZ5+7lvo# zHjFMbXMH}SBIPfivwCx9^_0JSK@R=JQCNLR(Bm&(0kiJ*5Wdo1z5%3vO|HjZz6EA= z&38^h-?YESCAmGCFq{RLS);Kk*#hyOjmF+ z^eVW4_(;wCIfZ3)m1~#j=1SFWQ0q0&hPvv@NTM$|NUA#t%b~G4zXyR80CY*!B4wGi zCsZ`+0qIB}vW28wix}CmR1>(o)gK|tq+}M!ITLVpbGO8;eYH`EuLFVpZHs=_E=Bmu zDq+dKQe!_zgujqjKjrx00c7Ea2MTK+Nb7g^MOx?Mef)0Qguk>I2!BaNm4k&m1QOvd zBo>O)*^r_MCnn+$|Wy!kaEG?+mAbqN} zLApb)Mo=evq@Tu^m$}Zm9ITzv)xiW|gbW-J?Lxu`p#}uIRB+eyvVYLIiKF&t|4u!O z7>qDd$eECcb0JgPsH-DLL4Biyk_DNpZ!{I$QvIG21L+85nMGA&g_jM9P!^KgtGi$M z!&EHr&^VXFBaS6_hzmpx=7J8rhF<4i0v7z$krQH}&Ae!hHmUU<=>x$8=n zn@@$0q5|EUjE2UuqHR)pYAl>U4*lOzSWP6D8VmW1coHD%(mA=Kywg1ekUp7I&rnzh z%sM@$G7W=H!x6KxkTmFnn3Sc$9tme6$3bV7&_Y7<3_5b@EP_Tx!Y8!07XpE&vDYgQ z_C$%4oDGTDSF+>}NZo7AB`-M?hM7jA=HZChSIA_eN~y3%!Uf2&Q457uCNyuO;WH5Dm$sbTfGQhmx@mXSkuaTHcj zf+=?~CtrbCqo;2A%L+icPOisaYJpi@Q^z#ArXEMk#6qHLidXl*Oe{6F!x+f1Z=fJH8 zbXbYHu-x*e?*o0z4oZ2~TB?%XwT@~03CH1x=~zhXPgqZl?OrD!$3G!1yc31Dw?E+| zXyi{Q8G+?+cNOfsqF`s+h>?LABjP7RMNd0Ls@MRDnOV|ZUXiUf0`rXLn3?s1n3?s1 z*gDn^VrI5uyBVm|8s*Of$Kz>d0TNGZoHu1ZB|!)_GXO+xcFOva;|p_1|FfYQKWf%q-RvPoN{+*U~J z8%v7EOKt<^+xP5tg6?-pu;AAZV!3;VjEdVcFkqjMV1QNXC6h4U9&@IJB8-$8V*N~K_foXsI{G07HHDqj{{-n zJRv2Ygv181WO@AMY4Vc(@(j~x)U!BZgILI9qn@Y2_LmosW20Ua+Dk(7HcI?u7gXXe zr!Nj{)8a29%O<@di!=hflXr}Y1X^3X)Ka@2!i*McVOmZSlbax1LtGcSYWi&r#ObyXOP%677{Zj)Fiv> z3#L)=mpEeASjc3(Un8mWzJfREUCRSMYw>T8V~yY9h;3sbv283Q=13qt?{Vd*9|(Fk z^!SmWH*Wfgp!*gA+r|=9ma%usje2UHYC%(VS#GppO*CS@>F_hy<~-F=sd*{~N&P~C z%v1F@SFZj_K<23=<(#Mb4JsPtcO1j&4@erNC+-aYWTE6dRokN#f%~-3UnIjSe+&5^ zNW5wa>7Jq5Ll&xRhi~hJl?KTwQDXo}Ylf=rvC6=>Y1Xul!ATcJ2nY8J~7a z#itIE>P~_yJ|$qrr%buxQvftdI*tfWAz_r_^r4JT$xX(mZF)`MK7^iR!z_J;-47C> zC!{CzBnx#86xKeFtdmvli?khj)-Mn2#u{OgQRQGE4}nDJ37HH%-Mb}X<`pW>+Ybmm z3A%%Rgq{Q&Tz!I>SArFmrH6^7-IGVd!H~gZa_RiMhU%Ib`N&?OBaFB5nRv83S*BjoV90JI^MZv<=Lpw>lbQm!GP;x!jn~}h*u_BXcnD%fSv7r$p zeYgr68c|_iZ?cf%&^}sdV}#}%+IhVh3yoZFK5dt{x-RfVJR1OeBS#A6LSjQBlH?Ca z-Jax=mmI&wF^x7A;D`;4Ad_u6iVFL3Gafm%X@bxu3eDRlx!g>GijBIlcS8Uh8X?CP zO_5@Skl4_O3|G%$LnC0m?Z~J5!CWC+Aorp{8F=Aqq#2bxp!=RA!$cg)JC!o^Nj zxH#Aj7uzr?KlFg26|5UZtd+X!Ah9_TWJGt)^fZu#7BmXWfMg5U0EM*9DIMMq9DjXa z?7{ISl2K($$SWc70xM)h-|IxpYN$Mc9;QZ>6hep4QrN%X4Q?V zu2@xLK14kpR6MwwVm-KX=+%1atdAS26PW9)%fWtd7bgfKoQNYPP9b50&}o=Ar9vLu z@%7)fn&$(B+SF4B!U`LN-3*BtQ%J{?yHiO)g{KMSbjW0dXHdZ>cWpns5I9eZpUFJx zI!lOWLt?5Fl8^4p;ojO83iG7EJs{>u{UGK^33_*A+(gj6y=p-YoAuakzHC8~OS?YUZ5*FYxQqpn5TcKXd91ZMP%ZzdU4UMJ-1A+etlWYXz9 z-(yubLgjgh0b45hL2Rky2eGA+fN*+w*JX=27}-0ER8?oGc@5E6e#~5~wH0i0iTOXN zCFTy2x|Iay&LX!FkR|4laxO8y9V%Mo4ji$i5+tpXzM&JABePU;iFw;?KLpbI7PXQM zv)nE0dmynK8PdI!yp1f>d7rTEhh&|s@&TmnrQ}`Gf>{IO50Z>39}@Dzka+PG@;{c6 zZ-dG=ylnS_*i^|6VpAmnt>I;-X=!^&R_Uafv+8H&*34aGPW?Uxwz(v0uwzM3zDX0l`kW0FQU5ffZ&|=@mEMjm9GleGk(UOi zwLG$^H=n-t<{Gzy66bL#8`ik>w@0CF9EJLtYurX%795VDsDI0xmbAnjT~N=ST^OO# zH((`Q&zCWD&Dnq>K}vT-5ljaKD2@m<}1y-=`J5FFyVNl?TMvUHl-v?&1d#rVD6w=5F({ zY<{nGOwshp>M_+d>vEGZI;hbnF#Q-cKh5gUt6!*-@3Hbd?_ZgFR+oeI1y=PNLD=AT z91)d6(gyu-Q~D=0udv_nNUv>bN$~ED)BYj`EBr0&e;^T-Lq?+3cUILNQcz(##7Bt9 zA*~7nNJl^9b$RN7Al`fmGLOpAgxMYv5jkWe>ic-A>ImFt&rW_2;kX|}I4+=N&#uVf zx@?~uQ>WxDUt3>pWR@huap=vHV@c}daFA4Y60DQsZkV=h>S;wrwd79ub(^hXrAtzEZFsBqDQ2w+{~>3)K%4);^HQ>eaqT zTRvR2%Uq>9q7AEbZ$*gCQmA{dlo|qw5FIk<&F*hmAVi1Czc`wpJD5j^PS8D>g%F*f zy-oG7&h5r|9|;c!Ln5&1hOF$WhWfJAb>J7?)StD8|)3EB{I3g&AgjGYk5tLJr3d)g_pj>EU zgys!^B`AkRg7O7{wxeo-ivsa%Anbq~DVYn2sGKbM0~}E~c~(@;G#XWaBcgK1WTTFv zA{CV*CsDc3CJN2lC^IUDN*)3qusm21h)+hAO`0O*3Lz1elOj#BZjlj|1M{4U*k{-e zA}lB9o=HGhPSCw|2*UEt49jO&Cuj(q4BftOE@_%RCTnHxoT}005`rSAefP}{k}4*_ zx^L#tS3*GUnooz<|V|zaTKLtql(C zTr>|TjW!?0uquT_U=HcN7hXsfeuI`oU=C^h#A2lFd*L@%2KOK^CmB^P5%N+<1m=+c z5dfA$<>?Lt=6(=?xgSJeE}&)jO7??ix~2*YIX7n(t(dd0aq*(sIcE4$O-1V5tYJ;& z_fb-h&z6zH6V*{zMG4}c<3wGKEKk(iG(0}j%`3B8VK zwEZ|7F>efMwS7G`_8ENwa%_8Ccqa)eUIOm@c z#D_vIBNsJZF5D|15hX)L^bX+p%zYJ99uV*B`a!(2>j&}9u7K8Qyjf1;sJSsvX~W#9 zW5-ULY808H=XIcBW1uu^V<3lKT~D388RqArEHwLRs#H*_H#y8_)#3hKK${5z?h4fQ%sIg)wRkJ@ zsOxSa-UEqX7?N87d2helfO|kJU-pAozU&9Fd|5!tnhzp}+eLd$GJ9s`*t+UD<=Lyv zdz6QuH|HepPt8d>Na|q{tT{333C+J@^*0tOIeeW zvAwS@H1`?k)OP*(!9#*YJ6Rb__Lx-mI3#waf^^STK1mjS@+o0G4VnB&^$gPXbmhJm z279E%pCuVpJ}2boArU7-CZ{XC9q}SmzK(dw58{hAeh^>05zy+0SH!LLyWw6kima?E zDxO`Fn>%leIf}drwtg?PpuW z2V~(_J`~nRko*Q`lKPH> zd}wHUK2Z3cKmh<`quxJ})aRJ>w%z+maJd%$i(J(Bw{ZV~ zM63%L(HDRhsxCv?@g=xD$c8FiAa*;(or`b6m>;AKmAjbO?U>*KSi6JlcHBiKj32{H zj97baq_B9|jD@RX<>mxQ8rZt*Gk&I~+dGRpSgJi4_Fm#JcLxH`qC#C#&Yiy=-ve_Fyx^@sUal8_Zbp&GPc{$4`Et)*JxPG zXF_6KG$h`Q2G28QsYXTyQMiuDw_oBBQmsj4guLMO;XO?(sH4qRq}8Qt9(dWC4CFbGLEIJ?b3RE9PF7^ zR6sV&a-^`2f{dy0knZi$CXj_XCkks4BqX+>>TiHIB+{Y#ckreBZwaY+b$^IYQIz zFGV_7s+f$^QQmUy5&~xe&?R*iQkDbGhKgpHgCoLdNSLKp@e0@_%u+4jiU)oZoEgy-U)ZIPh?^lJ`X!D!7m|fK82}<~hNP)kFJFOZ|B?UE}CX~}5ts2ju<~};=^}h$Zrp3=>9+jOX z%(Ede9}O9aKH_mm%tr(FfS8Z=gP4!@gLrROK+B#j$RUui@8PQOg^K!>lS`_qO-H=| zdh;Icrqn&0gQPAb!Mcaz(0dU9xrdXK^Byh%742~`j+l>zq&>3v@t8|lDtQxkW79vu zV9{kH!z7mr`3gt`%aHE7x2woPl~)Vv8pvdo>RP0&+qZkRr7aJ}H!m~>AOBQ5>$4)aR-?oBn zUgAznZ3yZhsar{~E^&O=aT@`-#7W9|iMt&t8s!cgF{2C#qlEh1g9&98N?zg0802Uz z*BpFE%Zb-@YH9xU$hM{J(@L~ZD~rQUcT4emAQ4qVy03X{WZ_5d6W0BZ{1N=d14!DJ zyk9${^$f%xBo{S4B;1D~F~bb`AD6ssQ29K3yC1|1vme9^vw)W6cbb;BpByiau3VG5 zU~Oh}hB;_F2DbU+IGkE>;UKBUNwA(A)ACOckSE8Iay~hJ5-J+yDI77u3`uwF9mF8= z3`<#0j!(do#6B$({_Mi1iEVhAxHM4oEX%@3&q>+mArVGHx}O`rNEUwKC1LG?q4KOQ#!h?*hY#4CxLF2ats-2MTK+NE#Y7 z?u(=y6>c4ph6%tybG08pjGZ8xpS) zLna-1zU%zQ+r&_X=D0w-O-yi#3&h*R1oK@W-X$hV1+!SfD#C;O(!vsPr+ zE*~{J+Z?BlgkG2Z>o3*q9qSz=brcEK9`*fkeH>3fHmH}BbAS2?P|+q6am1cjkhIB> zSoS)Zg}4ztZi*ALho@}~O(7RHDHQHhNW{sIn16BZVMP{doFS~4kWwSIutHLAXU^*+ zrj1CuDjp&g6&4A#7!o^JL1Ov^wB&29*5wR4SV0xabeRr2SP>lV0h)M_U{uRZ4>Mqb&;v$VXcw<@{*N zLa1mG20;jsA!(BNF!Ev+N`A8?al=7rZ-kB}7dBZU+@+9+kRjclX(=ZQH6AOh3P`E3 zSw)bvzS5F->fp3@;+3SL!YZLwL&j7MWb!L5lU=66Cy1c(?C^w75D`4o#l$Cw2o83E z_yiHbUe?gOLPm_J860M=YYfdOQ6)|F=9fYE?&-1c$u6t7mSrE(bd} zY#;~=G~$Tw4MD;JaxIc%&mLOFHV(9Ah!1Vsd`Q}ez_cbZV3L@WT?vVJ8PX9c zt|kT5uMx^xNUQpF)JUY*Yh>C>f%tLEqq5_LxgHXq9)jFkq!+OMEgaEjx53i@-2vNs+T55kf<{7sj4T z7AihZSm#3~E8c{py)L#gEA1IA-a;;Fyg;}YLSh0K@;}zaCZO_J`(i(c4-WZ3d~irW z%i5Q9^{V_=FiA)oLy|b0eJ-#7{Zb(qORI})OEnD#~ar(4O`xT zV_4k?Nn0*#RyR>$FSEEAIWASWMQB@u=2@z+hs!LsLL#a&FJfvq@()!mTE2Hr!3y_Vu$= 0.9) + assert item['valid'][0.9] / item['total_frames'] >= 0.2 + + dataset = PoseDataset( + ann_file=ann_file, + pipeline=[], + split='train', + valid_ratio=0.3, + box_thr=0.7) + assert len(dataset) == 87 + for item in dataset: + assert np.all(item['box_score'][item['anno_inds']] >= 0.7) + assert item['valid'][0.7] / item['total_frames'] >= 0.3 + + with pytest.raises(AssertionError): + dataset = PoseDataset( + ann_file=ann_file, pipeline=[], valid_ratio=0.2, box_thr=0.55) diff --git a/tests/datasets/transforms/test_pose_transforms.py b/tests/datasets/transforms/test_pose_transforms.py index 913447f938..7383a5380c 100644 --- a/tests/datasets/transforms/test_pose_transforms.py +++ b/tests/datasets/transforms/test_pose_transforms.py @@ -2,17 +2,15 @@ import copy import copy as cp import os.path as osp -import tempfile from collections import defaultdict import numpy as np import pytest -from mmengine import dump from mmengine.testing import assert_dict_has_keys from numpy.testing import assert_array_almost_equal, assert_array_equal -from mmaction.datasets.transforms import (GeneratePoseTarget, GenSkeFeat, - JointToBone, LoadKineticsPose, +from mmaction.datasets.transforms import (DecompressPose, GeneratePoseTarget, + GenSkeFeat, JointToBone, MergeSkeFeat, MMCompact, MMDecode, MMUniformSampleFrames, PadTo, PoseCompact, PoseDecode, @@ -23,7 +21,7 @@ class TestPoseTransforms: @staticmethod - def test_load_kinetics_pose(): + def test_decompress_pose(): def get_mode(arr): cnt = defaultdict(lambda: 0) @@ -32,86 +30,68 @@ def get_mode(arr): max_val = max(cnt.values()) return [k for k in cnt if cnt[k] == max_val], max_val - with tempfile.TemporaryDirectory() as tmpdir: - filename = osp.join(tmpdir, 'tmp.pkl') - total_frames = 100 - img_shape = (224, 224) - frame_inds = np.random.choice(range(100), size=120) - frame_inds.sort() - anno_flag = np.random.random(120) > 0.1 - anno_inds = np.array([i for i, f in enumerate(anno_flag) if f]) - kp = np.random.random([120, 17, 3]) - dump(kp, filename) - results = dict( - filename=filename, - total_frames=total_frames, - img_shape=img_shape, - frame_inds=frame_inds) - - inp = cp.deepcopy(results) - - with pytest.raises(NotImplementedError): - LoadKineticsPose(squeeze=True, max_person=100, source='xxx') - - load_kinetics_pose = LoadKineticsPose( - squeeze=True, max_person=100, source='openpose-18') - - assert str(load_kinetics_pose) == ( - 'LoadKineticsPose(io_backend=disk, ' - 'squeeze=True, max_person=100, ' - "keypoint_weight={'face': 1, " - "'torso': 2, 'limb': 3}, " - 'source=openpose-18, kwargs={})') - return_results = load_kinetics_pose(inp) - assert return_results['keypoint'].shape[:-1] == \ - return_results['keypoint_score'].shape - - num_person = return_results['keypoint'].shape[0] - num_frame = return_results['keypoint'].shape[1] - assert num_person == get_mode(frame_inds)[1] - assert np.max(return_results['keypoint']) > 1 - assert num_frame == len(set(frame_inds)) - - inp = cp.deepcopy(results) - load_kinetics_pose = LoadKineticsPose( - squeeze=False, max_person=100, source='openpose-18') - return_results = load_kinetics_pose(inp) - assert return_results['keypoint'].shape[:-1] == \ - return_results['keypoint_score'].shape - - num_person = return_results['keypoint'].shape[0] - num_frame = return_results['keypoint'].shape[1] - assert num_person == get_mode(frame_inds)[1] - assert np.max(return_results['keypoint']) > 1 - assert num_frame == total_frames - - inp = cp.deepcopy(results) - inp['anno_inds'] = anno_inds - load_kinetics_pose = LoadKineticsPose( - squeeze=True, max_person=100, source='mmpose') - return_results = load_kinetics_pose(inp) - assert return_results['keypoint'].shape[:-1] == \ - return_results['keypoint_score'].shape - - num_person = return_results['keypoint'].shape[0] - num_frame = return_results['keypoint'].shape[1] - assert num_person == get_mode(frame_inds[anno_inds])[1] - assert np.max(return_results['keypoint']) <= 1 - assert num_frame == len(set(frame_inds[anno_inds])) - - inp = cp.deepcopy(results) - inp['anno_inds'] = anno_inds - load_kinetics_pose = LoadKineticsPose( - squeeze=True, max_person=2, source='mmpose') - return_results = load_kinetics_pose(inp) - assert return_results['keypoint'].shape[:-1] == \ - return_results['keypoint_score'].shape - - num_person = return_results['keypoint'].shape[0] - num_frame = return_results['keypoint'].shape[1] - assert num_person <= 2 - assert np.max(return_results['keypoint']) <= 1 - assert num_frame == len(set(frame_inds[anno_inds])) + total_frames = 100 + img_shape = (224, 224) + frame_inds = np.random.choice(range(100), size=120) + frame_inds.sort() + anno_flag = np.random.random(120) > 0.1 + anno_inds = np.array([i for i, f in enumerate(anno_flag) if f]) + kp = np.random.random([120, 17, 3]) + results = dict( + frame_inds=frame_inds, + keypoint=kp, + total_frames=total_frames, + img_shape=img_shape) + + inp = cp.deepcopy(results) + + decompress_pose = DecompressPose(squeeze=True, max_person=100) + + assert str(decompress_pose) == ( + 'DecompressPose(squeeze=True, max_person=100)') + return_results = decompress_pose(inp) + assert return_results['keypoint'].shape[:-1] == \ + return_results['keypoint_score'].shape + + num_person = return_results['keypoint'].shape[0] + num_frame = return_results['keypoint'].shape[1] + assert num_person == get_mode(frame_inds)[1] + assert num_frame == len(set(frame_inds)) + + inp = cp.deepcopy(results) + decompress_pose = DecompressPose(squeeze=False, max_person=100) + return_results = decompress_pose(inp) + assert return_results['keypoint'].shape[:-1] == \ + return_results['keypoint_score'].shape + + num_person = return_results['keypoint'].shape[0] + num_frame = return_results['keypoint'].shape[1] + assert num_person == get_mode(frame_inds)[1] + assert num_frame == total_frames + + inp = cp.deepcopy(results) + inp['anno_inds'] = anno_inds + decompress_pose = DecompressPose(squeeze=True, max_person=100) + return_results = decompress_pose(inp) + assert return_results['keypoint'].shape[:-1] == \ + return_results['keypoint_score'].shape + + num_person = return_results['keypoint'].shape[0] + num_frame = return_results['keypoint'].shape[1] + assert num_person == get_mode(frame_inds[anno_inds])[1] + assert num_frame == len(set(frame_inds[anno_inds])) + + inp = cp.deepcopy(results) + inp['anno_inds'] = anno_inds + decompress_pose = DecompressPose(squeeze=True, max_person=2) + return_results = decompress_pose(inp) + assert return_results['keypoint'].shape[:-1] == \ + return_results['keypoint_score'].shape + + num_person = return_results['keypoint'].shape[0] + num_frame = return_results['keypoint'].shape[1] + assert num_person <= 2 + assert num_frame == len(set(frame_inds[anno_inds])) @staticmethod def test_generate_pose_target(): diff --git a/tools/data/skeleton/README.md b/tools/data/skeleton/README.md index 2f55a2021e..b836db17da 100644 --- a/tools/data/skeleton/README.md +++ b/tools/data/skeleton/README.md @@ -32,7 +32,7 @@ We provide links to the pre-processed skeleton annotations, you can directly dow - Diving48 \[2D Skeleton\]: https://download.openmmlab.com/mmaction/v1.0/skeleton/data/diving48_2d.pkl - Kinetics400 \[2D Skeleton\]: https://download.openmmlab.com/mmaction/v1.0/skeleton/data/k400_2d.pkl (Table of contents only, no skeleton annotations) -For Kinetics400, since the skeleton annotations are large, we do not provide the direct download links on aliyun. Please use the following link to download the `kpfiles` and extract it under `$MMACTION2/data/k400` for Kinetics400 training & testing: https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EeyDCVskqLtClMVVwqD53acBF2FEwkctp3vtRbkLfnKSTw?e=B3SZlM +For Kinetics400, since the skeleton annotations are large, we do not provide the direct download links on aliyun. Please use the following link to download the `k400_kpfiles_2d.zip` and extract it under `$MMACTION2/data/skeleton/kpfiles` for Kinetics400 training & testing: https://openxlab.org.cn/datasets/OpenMMLab/Kinetics400-skeleton If you want to generate 2D skeleton annotations of specified video, please install mmdetection and mmpose first, then use the following script to extract skeleton annotations of NTURGB+D video: diff --git a/tools/data/skeleton/README_zh-CN.md b/tools/data/skeleton/README_zh-CN.md index 2cd354a1d5..c2e01a6311 100644 --- a/tools/data/skeleton/README_zh-CN.md +++ b/tools/data/skeleton/README_zh-CN.md @@ -44,7 +44,7 @@ bash download_annotations.sh ${DATASET} - Diving48 \[2D Skeleton\]: https://download.openmmlab.com/mmaction/v1.0/skeleton/data/diving48_2d.pkl - Kinetics400 \[2D Skeleton\]: https://download.openmmlab.com/mmaction/v1.0/skeleton/data/k400_2d.pkl (ๅชๅŒ…ๅซๆ•ฐๆฎๅˆ—่กจ๏ผŒๆฒกๆœ‰ๅงฟๆ€ๆ ‡ๆณจๆ–‡ไปถ) -็”ฑไบŽ Kinetics400 ๆ•ฐๆฎ้›†ๅงฟๆ€ๆ ‡ๆณจๆ–‡ไปถ่ฟ‡ๅคง๏ผŒๆˆ‘ไปฌไธๆไพ›้˜ฟ้‡Œไบ‘็š„ไธ‹่ฝฝ้“พๆŽฅ๏ผŒ่ฏทไฝฟ็”จๆญค[้“พๆŽฅ](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EeyDCVskqLtClMVVwqD53acBF2FEwkctp3vtRbkLfnKSTw?e=B3SZlM)ไธ‹่ฝฝ `kpfiles`๏ผŒ่งฃๅŽ‹ๅˆฐ `$MMACTION2/data/k400` ็›ฎๅฝ•ไธ‹๏ผŒ็”จไบŽ Kinetics400 ็š„่ฎญ็ปƒๅ’Œๆต‹่ฏ•ใ€‚ +็”ฑไบŽ Kinetics400 ๆ•ฐๆฎ้›†ๅงฟๆ€ๆ ‡ๆณจๆ–‡ไปถ่ฟ‡ๅคง๏ผŒๆˆ‘ไปฌไธๆไพ›้˜ฟ้‡Œไบ‘็š„ไธ‹่ฝฝ้“พๆŽฅ๏ผŒ่ฏทไฝฟ็”จๆญค[้“พๆŽฅ](https://openxlab.org.cn/datasets/OpenMMLab/Kinetics400-skeleton)ไธ‹่ฝฝ `k400_kpfiles_2d.zip`๏ผŒ่งฃๅŽ‹ๅˆฐ `$MMACTION2/data/skeleton/kpfiles` ็›ฎๅฝ•ไธ‹๏ผŒ็”จไบŽ Kinetics400 ็š„่ฎญ็ปƒๅ’Œๆต‹่ฏ•ใ€‚ ่‹ฅๆƒณ็”Ÿๆˆๅ•ไธช่ง†้ข‘็š„ 2D ๅงฟๆ€ๆ ‡ๆณจๆ–‡ไปถ๏ผŒ็”จๆˆทๅœจๅฎ‰่ฃ… mmdetection ๅ’Œ mmpose ไน‹ๅŽ๏ผŒๅฏไฝฟ็”จไปฅไธ‹่„šๆœฌ่ฟ›่กŒ NTURGB+D ่ง†้ข‘็š„ๅงฟๆ€ๆๅ–๏ผš From 8ff889af65fd775d28f95b867fc23e7f95a10854 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Wed, 6 Sep 2023 17:58:04 +0800 Subject: [PATCH 13/24] [Fix] Fix multisports dataset detection (#2584) --- ...ned-r50_8xb8-8x8x1-cosine-10e_ava21-rgb.py | 4 +- ...ned-r50_8xb8-8x8x1-cosine-10e_ava22-rgb.py | 1 + .../lfb/slowonly-lfb-infer_r50_ava21-rgb.py | 1 + ...etrained-r50_8xb12-4x16x1-20e_ava21-rgb.py | 1 + ...a-pretrained-r50_infer-4x16x1_ava21-rgb.py | 1 + ...etrained-r50_8xb16-4x16x1-20e_ava21-rgb.py | 1 + ...ned-r50_8xb6-8x8x1-cosine-10e_ava22-rgb.py | 1 + ...pretrained-r50_8xb8-8x8x1-20e_ava21-rgb.py | 1 + ...re-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py | 1 + ...pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py | 1 + ...etrained-r101_8xb16-8x8x1-20e_ava21-rgb.py | 1 + ...ained-r50-nl_8xb16-4x16x1-20e_ava21-rgb.py | 1 + ...rained-r50-nl_8xb16-8x8x1-20e_ava21-rgb.py | 1 + ...etrained-r50_8xb16-4x16x1-20e_ava21-rgb.py | 1 + ...ned-r50_8xb16-4x16x1-8e_multisports-rgb.py | 1 + ...etrained-r50_8xb16-4x16x1-20e_ava21-rgb.py | 1 + ..._8xb8-16x4x1-20e-adamw_ava-kinetics-rgb.py | 1 + ..._8xb8-16x4x1-20e-adamw_ava-kinetics-rgb.py | 1 + mmaction/datasets/ava_dataset.py | 1 - .../functional/multisports_utils.py | 25 +- mmaction/models/backbones/vit_mae.py | 10 - mmaction/models/roi_heads/__init__.py | 31 +- .../models/roi_heads/bbox_heads/bbox_head.py | 35 +- .../roi_extractors/single_straight3d.py | 10 - mmaction/models/roi_heads/roi_head.py | 405 +++++++++--------- .../roi_heads/shared_heads/acrn_head.py | 13 - .../models/roi_heads/shared_heads/fbo_head.py | 12 - .../roi_heads/shared_heads/lfb_infer_head.py | 13 - mmaction/models/task_modules/__init__.py | 12 +- .../assigners/max_iou_assigner_ava.py | 223 +++++----- tests/models/roi_heads/test_bbox_heads.py | 17 +- .../models/similarity/test_clip_similarity.py | 3 + 32 files changed, 387 insertions(+), 444 deletions(-) diff --git a/configs/detection/acrn/slowfast-acrn_kinetics400-pretrained-r50_8xb8-8x8x1-cosine-10e_ava21-rgb.py b/configs/detection/acrn/slowfast-acrn_kinetics400-pretrained-r50_8xb8-8x8x1-cosine-10e_ava21-rgb.py index 10928a96ee..0b183ae812 100644 --- a/configs/detection/acrn/slowfast-acrn_kinetics400-pretrained-r50_8xb8-8x8x1-cosine-10e_ava21-rgb.py +++ b/configs/detection/acrn/slowfast-acrn_kinetics400-pretrained-r50_8xb8-8x8x1-cosine-10e_ava21-rgb.py @@ -46,6 +46,7 @@ shared_head=dict(type='ACRNHead', in_channels=4608, out_channels=2304), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2304, num_classes=81, multilabel=True, @@ -88,9 +89,6 @@ proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl' file_client_args = dict(io_backend='disk') -file_client_args = dict( - io_backend='petrel', - path_mapping=dict({'data/ava': 's254:s3://openmmlab/datasets/action/ava'})) train_pipeline = [ dict(type='SampleAVAFrames', clip_len=32, frame_interval=2), dict(type='RawFrameDecode', **file_client_args), diff --git a/configs/detection/acrn/slowfast-acrn_kinetics400-pretrained-r50_8xb8-8x8x1-cosine-10e_ava22-rgb.py b/configs/detection/acrn/slowfast-acrn_kinetics400-pretrained-r50_8xb8-8x8x1-cosine-10e_ava22-rgb.py index 4537d25cc7..3357d9c3ca 100644 --- a/configs/detection/acrn/slowfast-acrn_kinetics400-pretrained-r50_8xb8-8x8x1-cosine-10e_ava22-rgb.py +++ b/configs/detection/acrn/slowfast-acrn_kinetics400-pretrained-r50_8xb8-8x8x1-cosine-10e_ava22-rgb.py @@ -46,6 +46,7 @@ shared_head=dict(type='ACRNHead', in_channels=4608, out_channels=2304), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2304, num_classes=81, multilabel=True, diff --git a/configs/detection/lfb/slowonly-lfb-infer_r50_ava21-rgb.py b/configs/detection/lfb/slowonly-lfb-infer_r50_ava21-rgb.py index 278d87c1e1..6c1c60d7e4 100644 --- a/configs/detection/lfb/slowonly-lfb-infer_r50_ava21-rgb.py +++ b/configs/detection/lfb/slowonly-lfb-infer_r50_ava21-rgb.py @@ -34,6 +34,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2048, num_classes=81, multilabel=True, diff --git a/configs/detection/lfb/slowonly-lfb-nl_kinetics400-pretrained-r50_8xb12-4x16x1-20e_ava21-rgb.py b/configs/detection/lfb/slowonly-lfb-nl_kinetics400-pretrained-r50_8xb12-4x16x1-20e_ava21-rgb.py index 9d323ad0e4..5b1a837864 100644 --- a/configs/detection/lfb/slowonly-lfb-nl_kinetics400-pretrained-r50_8xb12-4x16x1-20e_ava21-rgb.py +++ b/configs/detection/lfb/slowonly-lfb-nl_kinetics400-pretrained-r50_8xb12-4x16x1-20e_ava21-rgb.py @@ -37,6 +37,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2560, num_classes=81, multilabel=True, diff --git a/configs/detection/lfb/slowonly-lfb_ava-pretrained-r50_infer-4x16x1_ava21-rgb.py b/configs/detection/lfb/slowonly-lfb_ava-pretrained-r50_infer-4x16x1_ava21-rgb.py index 2ba637545c..377da48f07 100644 --- a/configs/detection/lfb/slowonly-lfb_ava-pretrained-r50_infer-4x16x1_ava21-rgb.py +++ b/configs/detection/lfb/slowonly-lfb_ava-pretrained-r50_infer-4x16x1_ava21-rgb.py @@ -34,6 +34,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2048, num_classes=81, multilabel=True, diff --git a/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py b/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py index 0eb0e501e3..89cc9078ef 100644 --- a/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py +++ b/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py @@ -44,6 +44,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2304, num_classes=81, multilabel=True, diff --git a/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb6-8x8x1-cosine-10e_ava22-rgb.py b/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb6-8x8x1-cosine-10e_ava22-rgb.py index debeb5c7fd..a34af4fb62 100644 --- a/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb6-8x8x1-cosine-10e_ava22-rgb.py +++ b/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb6-8x8x1-cosine-10e_ava22-rgb.py @@ -45,6 +45,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2304, num_classes=81, multilabel=True, diff --git a/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb8-8x8x1-20e_ava21-rgb.py b/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb8-8x8x1-20e_ava21-rgb.py index 1e94a10960..00f3e491a8 100644 --- a/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb8-8x8x1-20e_ava21-rgb.py +++ b/configs/detection/slowfast/slowfast_kinetics400-pretrained-r50_8xb8-8x8x1-20e_ava21-rgb.py @@ -45,6 +45,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2304, num_classes=81, multilabel=True, diff --git a/configs/detection/slowonly/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py b/configs/detection/slowonly/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py index fd44f336ac..d35cf5331a 100644 --- a/configs/detection/slowonly/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py +++ b/configs/detection/slowonly/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py @@ -29,6 +29,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2048, num_classes=81, multilabel=True, diff --git a/configs/detection/slowonly/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py b/configs/detection/slowonly/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py index 4af750e8ad..1e59cd3494 100644 --- a/configs/detection/slowonly/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py +++ b/configs/detection/slowonly/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py @@ -29,6 +29,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2048, num_classes=81, multilabel=True, diff --git a/configs/detection/slowonly/slowonly_kinetics400-pretrained-r101_8xb16-8x8x1-20e_ava21-rgb.py b/configs/detection/slowonly/slowonly_kinetics400-pretrained-r101_8xb16-8x8x1-20e_ava21-rgb.py index 9bee13a25c..fc83f9e34b 100644 --- a/configs/detection/slowonly/slowonly_kinetics400-pretrained-r101_8xb16-8x8x1-20e_ava21-rgb.py +++ b/configs/detection/slowonly/slowonly_kinetics400-pretrained-r101_8xb16-8x8x1-20e_ava21-rgb.py @@ -28,6 +28,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2048, num_classes=81, multilabel=True, diff --git a/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50-nl_8xb16-4x16x1-20e_ava21-rgb.py b/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50-nl_8xb16-4x16x1-20e_ava21-rgb.py index cdc8ea8d98..38b1e7605e 100644 --- a/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50-nl_8xb16-4x16x1-20e_ava21-rgb.py +++ b/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50-nl_8xb16-4x16x1-20e_ava21-rgb.py @@ -36,6 +36,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2048, num_classes=81, multilabel=True, diff --git a/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50-nl_8xb16-8x8x1-20e_ava21-rgb.py b/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50-nl_8xb16-8x8x1-20e_ava21-rgb.py index 9b6dd00fdb..ee6335ecac 100644 --- a/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50-nl_8xb16-8x8x1-20e_ava21-rgb.py +++ b/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50-nl_8xb16-8x8x1-20e_ava21-rgb.py @@ -36,6 +36,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2048, num_classes=81, multilabel=True, diff --git a/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py b/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py index a83408c84a..ddb5f34cb4 100644 --- a/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py +++ b/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py @@ -29,6 +29,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2048, num_classes=81, multilabel=True, diff --git a/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50_8xb16-4x16x1-8e_multisports-rgb.py b/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50_8xb16-4x16x1-8e_multisports-rgb.py index 0d83ca0d48..8b1887eac1 100644 --- a/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50_8xb16-4x16x1-8e_multisports-rgb.py +++ b/configs/detection/slowonly/slowonly_kinetics400-pretrained-r50_8xb16-4x16x1-8e_multisports-rgb.py @@ -30,6 +30,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=False, in_channels=2048, num_classes=num_classes, multilabel=False, diff --git a/configs/detection/slowonly/slowonly_kinetics700-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py b/configs/detection/slowonly/slowonly_kinetics700-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py index a68893a015..3df1b248f7 100644 --- a/configs/detection/slowonly/slowonly_kinetics700-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py +++ b/configs/detection/slowonly/slowonly_kinetics700-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py @@ -29,6 +29,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=2048, num_classes=81, multilabel=True, diff --git a/configs/detection/videomae/vit-base-p16_videomae-k400-pre_8xb8-16x4x1-20e-adamw_ava-kinetics-rgb.py b/configs/detection/videomae/vit-base-p16_videomae-k400-pre_8xb8-16x4x1-20e-adamw_ava-kinetics-rgb.py index 6e5950b847..8ba9c7a22a 100644 --- a/configs/detection/videomae/vit-base-p16_videomae-k400-pre_8xb8-16x4x1-20e-adamw_ava-kinetics-rgb.py +++ b/configs/detection/videomae/vit-base-p16_videomae-k400-pre_8xb8-16x4x1-20e-adamw_ava-kinetics-rgb.py @@ -31,6 +31,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=768, num_classes=81, multilabel=True, diff --git a/configs/detection/videomae/vit-large-p16_videomae-k400-pre_8xb8-16x4x1-20e-adamw_ava-kinetics-rgb.py b/configs/detection/videomae/vit-large-p16_videomae-k400-pre_8xb8-16x4x1-20e-adamw_ava-kinetics-rgb.py index 229f3ae013..a97eebf898 100644 --- a/configs/detection/videomae/vit-large-p16_videomae-k400-pre_8xb8-16x4x1-20e-adamw_ava-kinetics-rgb.py +++ b/configs/detection/videomae/vit-large-p16_videomae-k400-pre_8xb8-16x4x1-20e-adamw_ava-kinetics-rgb.py @@ -32,6 +32,7 @@ with_temporal_pool=True), bbox_head=dict( type='BBoxHeadAVA', + background_class=True, in_channels=1024, num_classes=81, multilabel=True, diff --git a/mmaction/datasets/ava_dataset.py b/mmaction/datasets/ava_dataset.py index e744dc9f5e..c1ba4a6b41 100644 --- a/mmaction/datasets/ava_dataset.py +++ b/mmaction/datasets/ava_dataset.py @@ -203,7 +203,6 @@ def parse_img_record(self, img_records: List[dict]) -> tuple: labels.append(label) entity_ids.append(img_record['entity_id']) - bboxes = np.stack(bboxes) labels = np.stack(labels) entity_ids = np.stack(entity_ids) diff --git a/mmaction/evaluation/functional/multisports_utils.py b/mmaction/evaluation/functional/multisports_utils.py index 516828c701..72643d977f 100644 --- a/mmaction/evaluation/functional/multisports_utils.py +++ b/mmaction/evaluation/functional/multisports_utils.py @@ -7,6 +7,7 @@ from collections import defaultdict import numpy as np +from mmengine.logging import MMLogger from rich.progress import track @@ -314,7 +315,7 @@ def tubescore(tt): def frameAP(GT, alldets, thr, print_info=True): - + logger = MMLogger.get_current_instance() vlist = GT['test_videos'][0] results = {} @@ -326,7 +327,7 @@ def frameAP(GT, alldets, thr, print_info=True): 'basketball save', 'basketball jump ball' ]: if print_info: - print('do not evaluate {}'.format(label)) + logger.info('do not evaluate {}'.format(label)) continue # det format: # noqa: E501 detections = alldets[alldets[:, 2] == ilabel, :] @@ -355,7 +356,7 @@ def frameAP(GT, alldets, thr, print_info=True): gt_num = sum([g.shape[0] for g in gt.values()]) if gt_num == 0: if print_info: - print('no such label', ilabel, label) + logger.info('no such label', ilabel, label) continue fp = 0 # false positives tp = 0 # true positives @@ -395,15 +396,15 @@ def frameAP(GT, alldets, thr, print_info=True): class_result[label] = pr_to_ap_voc(results[label]) * 100 frameap_result = np.mean(ap) if print_info: - print('frameAP_{}\n'.format(thr)) + logger.info('frameAP_{}\n'.format(thr)) for label in class_result: - print('{:20s} {:8.2f}'.format(label, class_result[label])) - print('{:20s} {:8.2f}'.format('mAP', frameap_result)) + logger.info('{:20s} {:8.2f}'.format(label, class_result[label])) + logger.info('{:20s} {:8.2f}'.format('mAP', frameap_result)) return frameap_result def videoAP(GT, alldets, thr, print_info=True): - + logger = MMLogger.get_current_instance() vlist = GT['test_videos'][0] res = {} @@ -414,7 +415,7 @@ def videoAP(GT, alldets, thr, print_info=True): 'basketball save', 'basketball jump ball' ]: if print_info: - print('do not evaluate{}'.format(GT['labels'][ilabel])) + logger.info('do not evaluate{}'.format(GT['labels'][ilabel])) continue detections = alldets[ilabel] # load ground-truth @@ -438,7 +439,7 @@ def videoAP(GT, alldets, thr, print_info=True): tp = 0 # true positives if gt_num == 0: if print_info: - print('no such label', ilabel, GT['labels'][ilabel]) + logger.info('no such label', ilabel, GT['labels'][ilabel]) continue is_gt_box_detected = {} for i, j in enumerate( @@ -471,10 +472,10 @@ def videoAP(GT, alldets, thr, print_info=True): for label in res: class_result[label] = pr_to_ap_voc(res[label]) * 100 if print_info: - print('VideoAP_{}\n'.format(thr)) + logger.info('VideoAP_{}\n'.format(thr)) for label in class_result: - print('{:20s} {:8.2f}'.format(label, class_result[label])) - print('{:20s} {:8.2f}'.format('mAP', videoap_result)) + logger.info('{:20s} {:8.2f}'.format(label, class_result[label])) + logger.info('{:20s} {:8.2f}'.format('mAP', videoap_result)) return videoap_result diff --git a/mmaction/models/backbones/vit_mae.py b/mmaction/models/backbones/vit_mae.py index 31210beba2..e549122fbc 100644 --- a/mmaction/models/backbones/vit_mae.py +++ b/mmaction/models/backbones/vit_mae.py @@ -12,12 +12,6 @@ from mmaction.registry import MODELS from mmaction.utils import ConfigType, OptConfigType -try: - from mmdet.registry import MODELS as MMDET_MODELS - mmdet_imported = True -except (ImportError, ModuleNotFoundError): - mmdet_imported = False - class Attention(BaseModule): """Multi-head Self-attention. @@ -387,7 +381,3 @@ def forward(self, x: Tensor) -> Tensor: return self.fc_norm(x.mean(1)) return x[:, 0] - - -if mmdet_imported: - MMDET_MODELS.register_module()(VisionTransformer) diff --git a/mmaction/models/roi_heads/__init__.py b/mmaction/models/roi_heads/__init__.py index 6ff62a1929..d7d413bb79 100644 --- a/mmaction/models/roi_heads/__init__.py +++ b/mmaction/models/roi_heads/__init__.py @@ -1,10 +1,23 @@ # Copyright (c) OpenMMLab. All rights reserved. -from .bbox_heads import BBoxHeadAVA -from .roi_extractors import SingleRoIExtractor3D -from .roi_head import AVARoIHead -from .shared_heads import ACRNHead, FBOHead, LFBInferHead - -__all__ = [ - 'AVARoIHead', 'BBoxHeadAVA', 'SingleRoIExtractor3D', 'ACRNHead', 'FBOHead', - 'LFBInferHead' -] +try: + from mmdet.registry import MODELS as MMDET_MODELS + + from .bbox_heads import BBoxHeadAVA + from .roi_extractors import SingleRoIExtractor3D + from .roi_head import AVARoIHead + from .shared_heads import ACRNHead, FBOHead, LFBInferHead + + for module in [ + AVARoIHead, BBoxHeadAVA, SingleRoIExtractor3D, ACRNHead, FBOHead, + LFBInferHead + ]: + + MMDET_MODELS.register_module()(module) + + __all__ = [ + 'AVARoIHead', 'BBoxHeadAVA', 'SingleRoIExtractor3D', 'ACRNHead', + 'FBOHead', 'LFBInferHead' + ] + +except (ImportError, ModuleNotFoundError): + pass diff --git a/mmaction/models/roi_heads/bbox_heads/bbox_head.py b/mmaction/models/roi_heads/bbox_heads/bbox_head.py index 3fad373cf2..7faa632b18 100644 --- a/mmaction/models/roi_heads/bbox_heads/bbox_head.py +++ b/mmaction/models/roi_heads/bbox_heads/bbox_head.py @@ -5,25 +5,17 @@ import torch import torch.nn as nn import torch.nn.functional as F +from mmdet.models.task_modules.samplers import SamplingResult from mmengine.config import ConfigDict from mmengine.structures import InstanceData -from torch import Tensor - -from mmaction.structures.bbox import bbox_target -from mmaction.utils import InstanceList - -try: - from mmdet.models.task_modules.samplers import SamplingResult - from mmdet.registry import MODELS as MMDET_MODELS - mmdet_imported = True -except (ImportError, ModuleNotFoundError): - from mmaction.utils import SamplingResult - mmdet_imported = False - # Resolve cross-entropy function to support multi-target in Torch < 1.10 # This is a very basic 'hack', with minimal functionality to support the # procedure under prior torch versions from packaging import version as pv +from torch import Tensor + +from mmaction.structures.bbox import bbox_target +from mmaction.utils import InstanceList if pv.parse(torch.__version__) < pv.parse('1.10'): @@ -44,6 +36,8 @@ class BBoxHeadAVA(nn.Module): """Simplest RoI head, with only one fc layer for classification. Args: + background_class (bool): Whether set class 0 as background class and + ignore it when calculate loss. temporal_pool_type (str): The temporal pool type. Choices are ``avg`` or ``max``. Defaults to ``avg``. spatial_pool_type (str): The spatial pool type. Choices are ``avg`` or @@ -70,6 +64,7 @@ class BBoxHeadAVA(nn.Module): def __init__( self, + background_class: bool, temporal_pool_type: str = 'avg', spatial_pool_type: str = 'max', in_channels: int = 2048, @@ -98,6 +93,8 @@ def __init__( self.focal_gamma = focal_gamma self.focal_alpha = focal_alpha + self.background_class = background_class + if topk is None: self.topk = () elif isinstance(topk, int): @@ -251,9 +248,11 @@ def loss_and_target(self, cls_score: Tensor, rois: Tensor, losses = dict() # Only use the cls_score if cls_score is not None: - labels = labels[:, 1:] # Get valid labels (ignore first one) + if self.background_class: + labels = labels[:, 1:] # Get valid labels (ignore first one) + cls_score = cls_score[:, 1:] pos_inds = torch.sum(labels, dim=-1) > 0 - cls_score = cls_score[pos_inds, 1:] + cls_score = cls_score[pos_inds] labels = labels[pos_inds] # Compute First Recall/Precisions @@ -268,7 +267,7 @@ def loss_and_target(self, cls_score: Tensor, rois: Tensor, # If Single-label, need to ensure that target labels sum to 1: ie # that they are valid probabilities. - if not self.multilabel: + if not self.multilabel and self.background_class: labels = labels / labels.sum(dim=1, keepdim=True) # Select Loss function based on single/multi-label @@ -414,7 +413,3 @@ def _bbox_crop_undo(bboxes, crop_quadruple): results.scores = scores return results - - -if mmdet_imported: - MMDET_MODELS.register_module()(BBoxHeadAVA) diff --git a/mmaction/models/roi_heads/roi_extractors/single_straight3d.py b/mmaction/models/roi_heads/roi_extractors/single_straight3d.py index 6a1044cd74..242b1a5d13 100644 --- a/mmaction/models/roi_heads/roi_extractors/single_straight3d.py +++ b/mmaction/models/roi_heads/roi_extractors/single_straight3d.py @@ -6,12 +6,6 @@ import torch.nn.functional as F from torch import Tensor -try: - from mmdet.registry import MODELS as MMDET_MODELS - mmdet_imported = True -except (ImportError, ModuleNotFoundError): - mmdet_imported = False - class SingleRoIExtractor3D(nn.Module): """Extract RoI features from a single level feature map. @@ -130,7 +124,3 @@ def forward(self, feat: Union[Tensor, Tuple[Tensor]], roi_feats = torch.stack(roi_feats, dim=2) return roi_feats, feat - - -if mmdet_imported: - MMDET_MODELS.register_module()(SingleRoIExtractor3D) diff --git a/mmaction/models/roi_heads/roi_head.py b/mmaction/models/roi_heads/roi_head.py index baa1b42e77..f98d5fe39c 100644 --- a/mmaction/models/roi_heads/roi_head.py +++ b/mmaction/models/roi_heads/roi_head.py @@ -1,227 +1,206 @@ # Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple, Union +from mmdet.models.roi_heads import StandardRoIHead +from mmdet.models.task_modules.samplers import SamplingResult +from mmdet.structures.bbox import bbox2roi from torch import Tensor from mmaction.utils import ConfigType, InstanceList, SampleList -try: - from mmdet.models.roi_heads import StandardRoIHead - from mmdet.models.task_modules.samplers import SamplingResult - from mmdet.registry import MODELS as MMDET_MODELS - from mmdet.structures.bbox import bbox2roi - mmdet_imported = True -except (ImportError, ModuleNotFoundError): - from mmaction.utils import SamplingResult - mmdet_imported = False - -if mmdet_imported: - - @MMDET_MODELS.register_module() - class AVARoIHead(StandardRoIHead): - - def loss(self, x: Union[Tensor, - Tuple[Tensor]], rpn_results_list: InstanceList, - data_samples: SampleList, **kwargs) -> dict: - """Perform forward propagation and loss calculation of the - detection roi on the features of the upstream network. - - Args: - x (Tensor or Tuple[Tensor]): The image features extracted by - the upstream network. - rpn_results_list (List[:obj:`InstanceData`]): List of region - proposals. - data_samples (List[:obj:`ActionDataSample`]): The batch - data samples. - - Returns: - Dict[str, Tensor]: A dictionary of loss components. - """ - assert len(rpn_results_list) == len(data_samples) - batch_gt_instances = [] - for data_sample in data_samples: - batch_gt_instances.append(data_sample.gt_instances) - - # assign gts and sample proposals - num_imgs = len(data_samples) - sampling_results = [] - for i in range(num_imgs): - # rename rpn_results.bboxes to rpn_results.priors - rpn_results = rpn_results_list[i] - rpn_results.priors = rpn_results.pop('bboxes') - - assign_result = self.bbox_assigner.assign( - rpn_results, batch_gt_instances[i], None) - sampling_result = self.bbox_sampler.sample( - assign_result, rpn_results, batch_gt_instances[i]) - sampling_results.append(sampling_result) - - # LFB needs meta_info: 'img_key' - batch_img_metas = [ - data_samples.metainfo for data_samples in data_samples - ] - - losses = dict() - # bbox head forward and loss - bbox_results = self.bbox_loss(x, sampling_results, batch_img_metas) - losses.update(bbox_results['loss_bbox']) - - return losses - - def _bbox_forward(self, x: Union[Tensor, Tuple[Tensor]], rois: Tensor, - batch_img_metas: List[dict], **kwargs) -> dict: - """Box head forward function used in both training and testing. - - Args: - x (Tensor or Tuple[Tensor]): The image features extracted by - the upstream network. - rois (Tensor): RoIs with the shape (n, 5) where the first - column indicates batch id of each RoI. - batch_img_metas (List[dict]): List of image information. - - Returns: - dict[str, Tensor]: Usually returns a dictionary with keys: - - - `cls_score` (Tensor): Classification scores. - - `bbox_pred` (Tensor): Box energies / deltas. - - `bbox_feats` (Tensor): Extract bbox RoI features. - """ - bbox_feats, global_feat = self.bbox_roi_extractor(x, rois) - - if self.with_shared_head: - bbox_feats = self.shared_head( - bbox_feats, - feat=global_feat, - rois=rois, - img_metas=batch_img_metas) - - cls_score = self.bbox_head(bbox_feats) - - bbox_results = dict(cls_score=cls_score, bbox_feats=bbox_feats) - return bbox_results - - def bbox_loss(self, x: Union[Tensor, Tuple[Tensor]], - sampling_results: List[SamplingResult], + +class AVARoIHead(StandardRoIHead): + + def loss(self, x: Union[Tensor, + Tuple[Tensor]], rpn_results_list: InstanceList, + data_samples: SampleList, **kwargs) -> dict: + """Perform forward propagation and loss calculation of the detection + roi on the features of the upstream network. + + Args: + x (Tensor or Tuple[Tensor]): The image features extracted by + the upstream network. + rpn_results_list (List[:obj:`InstanceData`]): List of region + proposals. + data_samples (List[:obj:`ActionDataSample`]): The batch + data samples. + + Returns: + Dict[str, Tensor]: A dictionary of loss components. + """ + assert len(rpn_results_list) == len(data_samples) + batch_gt_instances = [] + for data_sample in data_samples: + batch_gt_instances.append(data_sample.gt_instances) + + # assign gts and sample proposals + num_imgs = len(data_samples) + sampling_results = [] + for i in range(num_imgs): + # rename rpn_results.bboxes to rpn_results.priors + rpn_results = rpn_results_list[i] + rpn_results.priors = rpn_results.pop('bboxes') + + assign_result = self.bbox_assigner.assign(rpn_results, + batch_gt_instances[i], + None) + sampling_result = self.bbox_sampler.sample(assign_result, + rpn_results, + batch_gt_instances[i]) + sampling_results.append(sampling_result) + + # LFB needs meta_info: 'img_key' + batch_img_metas = [ + data_samples.metainfo for data_samples in data_samples + ] + + losses = dict() + # bbox head forward and loss + bbox_results = self.bbox_loss(x, sampling_results, batch_img_metas) + losses.update(bbox_results['loss_bbox']) + + return losses + + def _bbox_forward(self, x: Union[Tensor, Tuple[Tensor]], rois: Tensor, batch_img_metas: List[dict], **kwargs) -> dict: - """Perform forward propagation and loss calculation of the bbox - head on the features of the upstream network. + """Box head forward function used in both training and testing. - Args: - x (Tensor or Tuple[Tensor]): The image features extracted by - the upstream network. - sampling_results (List[SamplingResult]): Sampling results. - batch_img_metas (List[dict]): List of image information. + Args: + x (Tensor or Tuple[Tensor]): The image features extracted by + the upstream network. + rois (Tensor): RoIs with the shape (n, 5) where the first + column indicates batch id of each RoI. + batch_img_metas (List[dict]): List of image information. - Returns: + Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - - `cls_score` (Tensor): Classification scores. - - `bbox_pred` (Tensor): Box energies / deltas. - - `bbox_feats` (Tensor): Extract bbox RoI features. - - `loss_bbox` (dict): A dictionary of bbox loss components. - """ - rois = bbox2roi([res.priors for res in sampling_results]) - bbox_results = self._bbox_forward(x, rois, batch_img_metas) + - `cls_score` (Tensor): Classification scores. + - `bbox_pred` (Tensor): Box energies / deltas. + - `bbox_feats` (Tensor): Extract bbox RoI features. + """ + bbox_feats, global_feat = self.bbox_roi_extractor(x, rois) - bbox_loss_and_target = self.bbox_head.loss_and_target( - cls_score=bbox_results['cls_score'], - rois=rois, - sampling_results=sampling_results, - rcnn_train_cfg=self.train_cfg) - - bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox']) - return bbox_results - - def predict(self, x: Union[Tensor, Tuple[Tensor]], - rpn_results_list: InstanceList, data_samples: SampleList, - **kwargs) -> InstanceList: - """Perform forward propagation of the roi head and predict - detection results on the features of the upstream network. - - Args: - x (Tensor or Tuple[Tensor]): The image features extracted by - the upstream network. - rpn_results_list (List[:obj:`InstanceData`]): list of region - proposals. - data_samples (List[:obj:`ActionDataSample`]): The batch - data samples. - - Returns: - List[obj:`InstanceData`]: Detection results of each image. - Each item usually contains following keys. - - - scores (Tensor): Classification scores, has a shape - (num_instance, ) - - labels (Tensor): Labels of bboxes, has a shape - (num_instances, ). - """ - assert self.with_bbox, 'Bbox head must be implemented.' - batch_img_metas = [ - data_samples.metainfo for data_samples in data_samples - ] - if isinstance(x, tuple): - x_shape = x[0].shape - else: - x_shape = x.shape - - assert x_shape[0] == 1, 'only accept 1 sample at test mode' - assert x_shape[0] == len(batch_img_metas) == len(rpn_results_list) - - results_list = self.predict_bbox( - x, - batch_img_metas, - rpn_results_list, - rcnn_test_cfg=self.test_cfg) - - return results_list - - def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], - rpn_results_list: InstanceList, - rcnn_test_cfg: ConfigType) -> InstanceList: - """Perform forward propagation of the bbox head and predict - detection results on the features of the upstream network. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - batch_img_metas (list[dict]): List of image information. - rpn_results_list (list[:obj:`InstanceData`]): List of region - proposals. - rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. - - Returns: - list[:obj:`InstanceData`]: Detection results of each image - after the post process. Each item usually contains following - keys: - - scores (Tensor): Classification scores, has a shape - (num_instance, ) - - labels (Tensor): Labels of bboxes, has a shape - (num_instances, ). - """ - proposals = [res.bboxes for res in rpn_results_list] - rois = bbox2roi(proposals) - bbox_results = self._bbox_forward(x, rois, batch_img_metas) - - # split batch bbox prediction back to each image - cls_scores = bbox_results['cls_score'] - num_proposals_per_img = tuple(len(p) for p in proposals) - rois = rois.split(num_proposals_per_img, 0) - cls_scores = cls_scores.split(num_proposals_per_img, 0) - - result_list = self.bbox_head.predict_by_feat( + if self.with_shared_head: + bbox_feats = self.shared_head( + bbox_feats, + feat=global_feat, rois=rois, - cls_scores=cls_scores, - batch_img_metas=batch_img_metas, - rcnn_test_cfg=rcnn_test_cfg) - - return result_list -else: - # Just define an empty class, so that __init__ can import it. - class AVARoIHead: - - def __init__(self, *args, **kwargs): - raise ImportError( - 'Failed to import `bbox2roi` from `mmdet.core.bbox`, ' - 'or failed to import `MODELS` from `mmdet.registry`, ' - 'or failed to import `StandardRoIHead` from ' - '`mmdet.models.roi_heads`. You will be unable to use ' - '`AVARoIHead`. ') + img_metas=batch_img_metas) + + cls_score = self.bbox_head(bbox_feats) + + bbox_results = dict(cls_score=cls_score, bbox_feats=bbox_feats) + return bbox_results + + def bbox_loss(self, x: Union[Tensor, Tuple[Tensor]], + sampling_results: List[SamplingResult], + batch_img_metas: List[dict], **kwargs) -> dict: + """Perform forward propagation and loss calculation of the bbox head on + the features of the upstream network. + + Args: + x (Tensor or Tuple[Tensor]): The image features extracted by + the upstream network. + sampling_results (List[SamplingResult]): Sampling results. + batch_img_metas (List[dict]): List of image information. + + Returns: + dict[str, Tensor]: Usually returns a dictionary with keys: + + - `cls_score` (Tensor): Classification scores. + - `bbox_pred` (Tensor): Box energies / deltas. + - `bbox_feats` (Tensor): Extract bbox RoI features. + - `loss_bbox` (dict): A dictionary of bbox loss components. + """ + rois = bbox2roi([res.priors for res in sampling_results]) + bbox_results = self._bbox_forward(x, rois, batch_img_metas) + + bbox_loss_and_target = self.bbox_head.loss_and_target( + cls_score=bbox_results['cls_score'], + rois=rois, + sampling_results=sampling_results, + rcnn_train_cfg=self.train_cfg) + + bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox']) + return bbox_results + + def predict(self, x: Union[Tensor, + Tuple[Tensor]], rpn_results_list: InstanceList, + data_samples: SampleList, **kwargs) -> InstanceList: + """Perform forward propagation of the roi head and predict detection + results on the features of the upstream network. + + Args: + x (Tensor or Tuple[Tensor]): The image features extracted by + the upstream network. + rpn_results_list (List[:obj:`InstanceData`]): list of region + proposals. + data_samples (List[:obj:`ActionDataSample`]): The batch + data samples. + + Returns: + List[obj:`InstanceData`]: Detection results of each image. + Each item usually contains following keys. + + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + """ + assert self.with_bbox, 'Bbox head must be implemented.' + batch_img_metas = [ + data_samples.metainfo for data_samples in data_samples + ] + if isinstance(x, tuple): + x_shape = x[0].shape + else: + x_shape = x.shape + + assert x_shape[0] == 1, 'only accept 1 sample at test mode' + assert x_shape[0] == len(batch_img_metas) == len(rpn_results_list) + + results_list = self.predict_bbox( + x, batch_img_metas, rpn_results_list, rcnn_test_cfg=self.test_cfg) + + return results_list + + def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], + rpn_results_list: InstanceList, + rcnn_test_cfg: ConfigType) -> InstanceList: + """Perform forward propagation of the bbox head and predict detection + results on the features of the upstream network. + + Args: + x (tuple[Tensor]): Feature maps of all scale level. + batch_img_metas (list[dict]): List of image information. + rpn_results_list (list[:obj:`InstanceData`]): List of region + proposals. + rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. + + Returns: + list[:obj:`InstanceData`]: Detection results of each image + after the post process. Each item usually contains following + keys: + - scores (Tensor): Classification scores, has a shape + (num_instance, ) + - labels (Tensor): Labels of bboxes, has a shape + (num_instances, ). + """ + proposals = [res.bboxes for res in rpn_results_list] + rois = bbox2roi(proposals) + bbox_results = self._bbox_forward(x, rois, batch_img_metas) + + # split batch bbox prediction back to each image + cls_scores = bbox_results['cls_score'] + num_proposals_per_img = tuple(len(p) for p in proposals) + rois = rois.split(num_proposals_per_img, 0) + cls_scores = cls_scores.split(num_proposals_per_img, 0) + + result_list = self.bbox_head.predict_by_feat( + rois=rois, + cls_scores=cls_scores, + batch_img_metas=batch_img_metas, + rcnn_test_cfg=rcnn_test_cfg) + + return result_list diff --git a/mmaction/models/roi_heads/shared_heads/acrn_head.py b/mmaction/models/roi_heads/shared_heads/acrn_head.py index 3271c74bbe..5c37e2c1ec 100644 --- a/mmaction/models/roi_heads/shared_heads/acrn_head.py +++ b/mmaction/models/roi_heads/shared_heads/acrn_head.py @@ -5,18 +5,9 @@ from mmengine.model.weight_init import constant_init, kaiming_init from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmaction.registry import MODELS - -try: - from mmdet.registry import MODELS as MMDET_MODELS - mmdet_imported = True -except (ImportError, ModuleNotFoundError): - mmdet_imported = False - # Note: All these heads take 5D Tensors as input (N, C, T, H, W) -@MODELS.register_module() class ACRNHead(nn.Module): """ACRN Head: Tile + 1x1 convolution + 3x3 convolution. @@ -132,7 +123,3 @@ def forward(self, x, feat, rois, **kwargs): new_feat = conv(new_feat) return new_feat - - -if mmdet_imported: - MMDET_MODELS.register_module()(ACRNHead) diff --git a/mmaction/models/roi_heads/shared_heads/fbo_head.py b/mmaction/models/roi_heads/shared_heads/fbo_head.py index aeb9c28514..8f4cba20ac 100644 --- a/mmaction/models/roi_heads/shared_heads/fbo_head.py +++ b/mmaction/models/roi_heads/shared_heads/fbo_head.py @@ -10,15 +10,8 @@ from mmengine.runner import load_checkpoint from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from mmaction.registry import MODELS from .lfb import LFB -try: - from mmdet.registry import MODELS as MMDET_MODELS - mmdet_imported = True -except (ImportError, ModuleNotFoundError): - mmdet_imported = False - class NonLocalLayer(nn.Module): """Non-local layer used in `FBONonLocal` is a variation of the vanilla non- @@ -322,7 +315,6 @@ def forward(self, st_feat, lt_feat): return out -@MODELS.register_module() class FBOHead(nn.Module): """Feature Bank Operator Head. @@ -403,7 +395,3 @@ def forward(self, x, rois, img_metas, **kwargs): out = torch.cat([identity, fbo_feat], dim=1) return out - - -if mmdet_imported: - MMDET_MODELS.register_module()(FBOHead) diff --git a/mmaction/models/roi_heads/shared_heads/lfb_infer_head.py b/mmaction/models/roi_heads/shared_heads/lfb_infer_head.py index fdf71092a1..d19fc36203 100644 --- a/mmaction/models/roi_heads/shared_heads/lfb_infer_head.py +++ b/mmaction/models/roi_heads/shared_heads/lfb_infer_head.py @@ -6,18 +6,9 @@ import torch.distributed as dist import torch.nn as nn -from mmaction.registry import MODELS - -try: - from mmdet.registry import MODELS as MMDET_MODELS - mmdet_imported = True -except (ImportError, ModuleNotFoundError): - mmdet_imported = False - # Note: All these heads take 5D Tensors as input (N, C, T, H, W) -@MODELS.register_module() class LFBInferHead(nn.Module): """Long-Term Feature Bank Infer Head. @@ -155,7 +146,3 @@ def __del__(self): osp.join(self.lfb_prefix_path, f'lfb_{self.dataset_mode}.pkl')) torch.save(lfb, lfb_file_path) print(f'LFB has been constructed in {lfb_file_path}!') - - -if mmdet_imported: - MMDET_MODELS.register_module()(LFBInferHead) diff --git a/mmaction/models/task_modules/__init__.py b/mmaction/models/task_modules/__init__.py index 9a6d4e76de..7fc1d7769e 100644 --- a/mmaction/models/task_modules/__init__.py +++ b/mmaction/models/task_modules/__init__.py @@ -1,4 +1,12 @@ # Copyright (c) OpenMMLab. All rights reserved. -from .assigners import MaxIoUAssignerAVA +try: + from mmdet.registry import TASK_UTILS as MMDET_TASK_UTILS -__all__ = ['MaxIoUAssignerAVA'] + from .assigners import MaxIoUAssignerAVA + + MMDET_TASK_UTILS.register_module()(MaxIoUAssignerAVA) + + __all__ = ['MaxIoUAssignerAVA'] + +except (ImportError, ModuleNotFoundError): + pass diff --git a/mmaction/models/task_modules/assigners/max_iou_assigner_ava.py b/mmaction/models/task_modules/assigners/max_iou_assigner_ava.py index 89fb6044ac..604065829f 100644 --- a/mmaction/models/task_modules/assigners/max_iou_assigner_ava.py +++ b/mmaction/models/task_modules/assigners/max_iou_assigner_ava.py @@ -1,138 +1,119 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch +from mmdet.models.task_modules import AssignResult, MaxIoUAssigner from torch import Tensor -try: - from mmdet.models.task_modules import AssignResult, MaxIoUAssigner - from mmdet.registry import TASK_UTILS as MMDET_TASK_UTILS - mmdet_imported = True -except (ImportError, ModuleNotFoundError): - mmdet_imported = False -if mmdet_imported: +class MaxIoUAssignerAVA(MaxIoUAssigner): + """Assign a corresponding gt bbox or background to each bbox. - @MMDET_TASK_UTILS.register_module() - class MaxIoUAssignerAVA(MaxIoUAssigner): - """Assign a corresponding gt bbox or background to each bbox. + Each proposals will be assigned with `-1`, `0`, or a positive integer + indicating the ground truth index. - Each proposals will be assigned with `-1`, `0`, or a positive integer - indicating the ground truth index. + - -1: don't care + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt - - -1: don't care - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt + Args: + pos_iou_thr (float): IoU threshold for positive bboxes. + neg_iou_thr (float | tuple): IoU threshold for negative bboxes. + min_pos_iou (float): Minimum iou for a bbox to be considered as a + positive bbox. Positive samples can have smaller IoU than + pos_iou_thr due to the 4th step (assign max IoU sample to each + gt). Defaults to 0. + gt_max_assign_all (bool): Whether to assign all bboxes with the + same highest overlap with some gt to that gt. Defaults to True. + """ - Args: - pos_iou_thr (float): IoU threshold for positive bboxes. - neg_iou_thr (float | tuple): IoU threshold for negative bboxes. - min_pos_iou (float): Minimum iou for a bbox to be considered as a - positive bbox. Positive samples can have smaller IoU than - pos_iou_thr due to the 4th step (assign max IoU sample to each - gt). Defaults to 0. - gt_max_assign_all (bool): Whether to assign all bboxes with the - same highest overlap with some gt to that gt. Defaults to True. - """ + # The function is overridden, to handle the case that gt_label is not + # int + def assign_wrt_overlaps(self, overlaps: Tensor, + gt_labels: Tensor) -> AssignResult: + """Assign w.r.t. the overlaps of bboxes with gts. - # The function is overridden, to handle the case that gt_label is not - # int - def assign_wrt_overlaps(self, overlaps: Tensor, - gt_labels: Tensor) -> AssignResult: - """Assign w.r.t. the overlaps of bboxes with gts. - - Args: - overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, - shape(k, n). - gt_labels (Tensor): Labels of k gt_bboxes, shape - (k, num_classes). - - Returns: - :obj:`AssignResult`: The assign result. - """ - num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) - - # 1. assign -1 by default - assigned_gt_inds = overlaps.new_full((num_bboxes, ), - -1, - dtype=torch.long) - - if num_gts == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = overlaps.new_zeros((num_bboxes, )) - assigned_labels = overlaps.new_full((num_bboxes, ), - -1, - dtype=torch.long) - if num_gts == 0: - # No truth, assign everything to background - assigned_gt_inds[:] = 0 - return AssignResult( - num_gts=num_gts, - gt_inds=assigned_gt_inds, - max_overlaps=max_overlaps, - labels=assigned_labels) - - # for each anchor, which gt best overlaps with it - # for each anchor, the max iou of all gts - max_overlaps, argmax_overlaps = overlaps.max(dim=0) - # for each gt, which anchor best overlaps with it - # for each gt, the max iou of all proposals - gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) - - # 2. assign negative: below - # the negative inds are set to be 0 - if isinstance(self.neg_iou_thr, float): - assigned_gt_inds[(max_overlaps >= 0) - & (max_overlaps < self.neg_iou_thr)] = 0 - elif isinstance(self.neg_iou_thr, tuple): - assert len(self.neg_iou_thr) == 2 - assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) - & (max_overlaps < self.neg_iou_thr[1])] = 0 - - # 3. assign positive: above positive IoU threshold - pos_inds = max_overlaps >= self.pos_iou_thr - assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 - - if self.match_low_quality: - # Low-quality matching will overwrite the assigned_gt_inds - # assigned in Step 3. Thus, the assigned gt might not be the - # best one for prediction. - # For example, if bbox A has 0.9 and 0.8 iou with GT bbox - # 1 & 2, bbox 1 will be assigned as the best target for bbox A - # in step 3. However, if GT bbox 2's gt_argmax_overlaps = A, - # bbox A's assigned_gt_inds will be overwritten to be bbox B. - # This might be the reason that it is not used in ROI Heads. - for i in range(num_gts): - if gt_max_overlaps[i] >= self.min_pos_iou: - if self.gt_max_assign_all: - max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] - assigned_gt_inds[max_iou_inds] = i + 1 - else: - assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 - - # consider multi-class case (AVA) - assert len(gt_labels[0]) > 1 - assigned_labels = assigned_gt_inds.new_zeros( - (num_bboxes, len(gt_labels[0])), dtype=torch.float32) - - # If not assigned, labels will be all 0 - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] + Args: + overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, + shape(k, n). + gt_labels (Tensor): Labels of k gt_bboxes, shape + (k, num_classes). + Returns: + :obj:`AssignResult`: The assign result. + """ + num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) + + # 1. assign -1 by default + assigned_gt_inds = overlaps.new_full((num_bboxes, ), + -1, + dtype=torch.long) + + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + max_overlaps = overlaps.new_zeros((num_bboxes, )) + assigned_labels = overlaps.new_full((num_bboxes, ), + -1, + dtype=torch.long) + if num_gts == 0: + # No truth, assign everything to background + assigned_gt_inds[:] = 0 return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=max_overlaps, labels=assigned_labels) -else: - # define an empty class, so that can be imported - class MaxIoUAssignerAVA: - - def __init__(self, *args, **kwargs): - raise ImportError( - 'Failed to import `AssignResult`, `MaxIoUAssigner` from ' - '`mmdet.core.bbox` or failed to import `TASK_UTILS` from ' - '`mmdet.registry`. The class `MaxIoUAssignerAVA` is ' - 'invalid. ') + # for each anchor, which gt best overlaps with it + # for each anchor, the max iou of all gts + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + # for each gt, which anchor best overlaps with it + # for each gt, the max iou of all proposals + gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) + + # 2. assign negative: below + # the negative inds are set to be 0 + if isinstance(self.neg_iou_thr, float): + assigned_gt_inds[(max_overlaps >= 0) + & (max_overlaps < self.neg_iou_thr)] = 0 + elif isinstance(self.neg_iou_thr, tuple): + assert len(self.neg_iou_thr) == 2 + assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) + & (max_overlaps < self.neg_iou_thr[1])] = 0 + + # 3. assign positive: above positive IoU threshold + pos_inds = max_overlaps >= self.pos_iou_thr + assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 + + if self.match_low_quality: + # Low-quality matching will overwrite the assigned_gt_inds + # assigned in Step 3. Thus, the assigned gt might not be the + # best one for prediction. + # For example, if bbox A has 0.9 and 0.8 iou with GT bbox + # 1 & 2, bbox 1 will be assigned as the best target for bbox A + # in step 3. However, if GT bbox 2's gt_argmax_overlaps = A, + # bbox A's assigned_gt_inds will be overwritten to be bbox B. + # This might be the reason that it is not used in ROI Heads. + for i in range(num_gts): + if gt_max_overlaps[i] >= self.min_pos_iou: + if self.gt_max_assign_all: + max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] + assigned_gt_inds[max_iou_inds] = i + 1 + else: + assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 + + # consider multi-class case (AVA) + assert len(gt_labels[0]) > 1 + assigned_labels = assigned_gt_inds.new_zeros( + (num_bboxes, len(gt_labels[0])), dtype=torch.float32) + + # If not assigned, labels will be all 0 + pos_inds = torch.nonzero( + assigned_gt_inds > 0, as_tuple=False).squeeze() + if pos_inds.numel() > 0: + assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - + 1] + + return AssignResult( + num_gts=num_gts, + gt_inds=assigned_gt_inds, + max_overlaps=max_overlaps, + labels=assigned_labels) diff --git a/tests/models/roi_heads/test_bbox_heads.py b/tests/models/roi_heads/test_bbox_heads.py index 3b756051b1..8f04e8c8ff 100644 --- a/tests/models/roi_heads/test_bbox_heads.py +++ b/tests/models/roi_heads/test_bbox_heads.py @@ -10,13 +10,14 @@ def test_bbox_head_ava(): bbox head.""" with pytest.raises(TypeError): # topk must be None, int or tuple[int] - BBoxHeadAVA(topk=0.1) + BBoxHeadAVA(background_class=True, topk=0.1) with pytest.raises(AssertionError): # topk should be smaller than num_classes - BBoxHeadAVA(num_classes=5, topk=(3, 5)) + BBoxHeadAVA(background_class=True, num_classes=5, topk=(3, 5)) - bbox_head = BBoxHeadAVA(in_channels=10, num_classes=4, topk=1) + bbox_head = BBoxHeadAVA( + background_class=True, in_channels=10, num_classes=4, topk=1) input = torch.randn([3, 10, 2, 2, 2]) ret = bbox_head(input) assert ret.shape == (3, 4) @@ -48,10 +49,16 @@ def test_bbox_head_ava(): torch.ones([4, 6], dtype=bool)) # Test Multi-Label Loss - bbox_head = BBoxHeadAVA() # Why is this here? isn't this redundant? + bbox_head = BBoxHeadAVA( + background_class=True) # Why is this here? isn't this redundant? bbox_head.init_weights() - bbox_head = BBoxHeadAVA(temporal_pool_type='max', spatial_pool_type='avg') + bbox_head = BBoxHeadAVA( + background_class=True, + temporal_pool_type='max', + spatial_pool_type='avg') bbox_head.init_weights() + + # test without background class """ losses = bbox_head.loss( cls_score=cls_score, diff --git a/tests/models/similarity/test_clip_similarity.py b/tests/models/similarity/test_clip_similarity.py index 9afa158243..6838cf812e 100644 --- a/tests/models/similarity/test_clip_similarity.py +++ b/tests/models/similarity/test_clip_similarity.py @@ -1,6 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. +import platform from unittest.mock import MagicMock +import pytest import torch from mmaction.registry import MODELS @@ -9,6 +11,7 @@ from mmaction.utils import register_all_modules +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') def test_clip_similarity(): register_all_modules() cfg = get_similarity_cfg( From de82f142df90dbdb7d8eeffdaa1ade8c3b66f26b Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Wed, 6 Sep 2023 18:00:38 +0800 Subject: [PATCH 14/24] [Enhancement] Support flexible runner (#2646) --- tools/test.py | 10 +++++++++- tools/train.py | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/tools/test.py b/tools/test.py index 4f310fa9e0..e1e62e16f7 100644 --- a/tools/test.py +++ b/tools/test.py @@ -6,6 +6,8 @@ from mmengine.config import Config, DictAction from mmengine.runner import Runner +from mmaction.registry import RUNNERS + def parse_args(): parser = argparse.ArgumentParser( @@ -108,7 +110,13 @@ def main(): cfg.load_from = args.checkpoint # build the runner from config - runner = Runner.from_cfg(cfg) + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) + else: + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) # start testing runner.test() diff --git a/tools/train.py b/tools/train.py index 74980a99e0..c7f4892332 100644 --- a/tools/train.py +++ b/tools/train.py @@ -6,6 +6,8 @@ from mmengine.config import Config, DictAction from mmengine.runner import Runner +from mmaction.registry import RUNNERS + def parse_args(): parser = argparse.ArgumentParser(description='Train a action recognizer') @@ -125,7 +127,13 @@ def main(): cfg = merge_args(cfg, args) # build the runner from config - runner = Runner.from_cfg(cfg) + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) + else: + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) # start training runner.train() From d18ca93d58ea489ccf159cf7ffdc5d6d55aec3f5 Mon Sep 17 00:00:00 2001 From: wxDai Date: Wed, 6 Sep 2023 21:22:28 +0800 Subject: [PATCH 15/24] [Fix] Refine ActionDataSample structure (#2658) --- demo/demo.ipynb | 2 +- demo/demo.py | 2 +- demo/demo_audio.py | 2 +- demo/demo_skeleton.py | 2 +- demo/demo_video_structuralize.py | 6 +- demo/fuse/bone.pkl | Bin 82992 -> 81562 bytes demo/fuse/joint.pkl | Bin 82815 -> 81385 bytes demo/long_video_demo.py | 2 +- demo/mmaction2_tutorial.ipynb | 3792 ++++++++--------- demo/webcam_demo.py | 2 +- docs/en/get_started/guide_to_framework.md | 17 +- docs/en/get_started/installation.md | 2 +- docs/zh_cn/get_started/guide_to_framework.md | 16 +- docs/zh_cn/get_started/installation.md | 2 +- mmaction/apis/inference.py | 4 +- .../inferencers/actionrecog_inferencer.py | 4 +- mmaction/datasets/transforms/formatting.py | 21 +- mmaction/evaluation/metrics/acc_metric.py | 94 +- .../data_preprocessors/data_preprocessor.py | 2 +- mmaction/models/heads/base.py | 11 +- mmaction/models/heads/omni_head.py | 5 +- mmaction/models/heads/rgbpose_head.py | 33 +- mmaction/models/necks/tpn.py | 2 +- mmaction/models/recognizers/base.py | 4 +- mmaction/models/utils/blending_utils.py | 8 +- mmaction/structures/action_data_sample.py | 108 +- mmaction/utils/gradcam_utils.py | 4 +- mmaction/visualization/action_visualizer.py | 6 +- projects/actionclip/README.md | 2 +- tests/apis/test_inference.py | 2 +- tests/datasets/transforms/test_formating.py | 6 +- tests/evaluation/metrics/test_acc_metric.py | 7 +- .../test_data_preprocessor.py | 6 +- .../test_multimodal_data_preprocessor.py | 2 +- tests/models/heads/test_feature_head.py | 4 +- tests/models/heads/test_omni_head.py | 6 +- tests/models/necks/test_tpn.py | 3 +- tests/models/recognizers/recognizer_omni.py | 15 +- tests/models/recognizers/test_recognizer2d.py | 6 +- tests/models/recognizers/test_recognizer3d.py | 6 +- .../models/recognizers/test_recognizer_gcn.py | 6 +- tests/models/utils/test_blending_utils.py | 3 +- tests/models/utils/test_gradcam.py | 4 +- tests/visualization/test_action_visualizer.py | 4 +- tests/visualization/test_video_backend.py | 32 +- tools/analysis_tools/report_accuracy.py | 13 +- tools/deployment/export_onnx_gcn.py | 2 +- tools/deployment/export_onnx_posec3d.py | 2 +- 48 files changed, 2083 insertions(+), 2201 deletions(-) diff --git a/demo/demo.ipynb b/demo/demo.ipynb index ebcf2ff538..9d5e958864 100644 --- a/demo/demo.ipynb +++ b/demo/demo.ipynb @@ -70,7 +70,7 @@ "label = '../tools/data/kinetics/label_map_k400.txt'\n", "results = inference_recognizer(model, video)\n", "\n", - "pred_scores = results.pred_scores.item.tolist()\n", + "pred_scores = results.pred_score.tolist()\n", "score_tuples = tuple(zip(range(len(pred_scores)), pred_scores))\n", "score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True)\n", "top5_label = score_sorted[:5]\n", diff --git a/demo/demo.py b/demo/demo.py index 6c9b5db5a5..d2ec044a04 100644 --- a/demo/demo.py +++ b/demo/demo.py @@ -119,7 +119,7 @@ def main(): model = init_recognizer(cfg, args.checkpoint, device=args.device) pred_result = inference_recognizer(model, args.video) - pred_scores = pred_result.pred_scores.item.tolist() + pred_scores = pred_result.pred_score.tolist() score_tuples = tuple(zip(range(len(pred_scores)), pred_scores)) score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True) top5_label = score_sorted[:5] diff --git a/demo/demo_audio.py b/demo/demo_audio.py index 2da446a2da..c874813f1f 100644 --- a/demo/demo_audio.py +++ b/demo/demo_audio.py @@ -39,7 +39,7 @@ def main(): raise NotImplementedError('Demo works on extracted audio features') pred_result = inference_recognizer(model, args.audio) - pred_scores = pred_result.pred_scores.item.tolist() + pred_scores = pred_result.pred_score.tolist() score_tuples = tuple(zip(range(len(pred_scores)), pred_scores)) score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True) top5_label = score_sorted[:5] diff --git a/demo/demo_skeleton.py b/demo/demo_skeleton.py index 7a162ef468..19245b6540 100644 --- a/demo/demo_skeleton.py +++ b/demo/demo_skeleton.py @@ -152,7 +152,7 @@ def main(): model = init_recognizer(config, args.checkpoint, args.device) result = inference_skeleton(model, pose_results, (h, w)) - max_pred_index = result.pred_scores.item.argmax().item() + max_pred_index = result.pred_score.argmax().item() label_map = [x.strip() for x in open(args.label_map).readlines()] action_label = label_map[max_pred_index] diff --git a/demo/demo_video_structuralize.py b/demo/demo_video_structuralize.py index 805dda7e14..85784efbf5 100644 --- a/demo/demo_video_structuralize.py +++ b/demo/demo_video_structuralize.py @@ -373,7 +373,7 @@ def skeleton_based_action_recognition(args, pose_results, h, w): skeleton_model = init_recognizer( skeleton_config, args.skeleton_checkpoint, device=args.device) result = inference_skeleton(skeleton_model, pose_results, (h, w)) - action_idx = result.pred_scores.item.argmax().item() + action_idx = result.pred_score.argmax().item() return label_map[action_idx] @@ -382,7 +382,7 @@ def rgb_based_action_recognition(args): rgb_config.model.backbone.pretrained = None rgb_model = init_recognizer(rgb_config, args.rgb_checkpoint, args.device) action_results = inference_recognizer(rgb_model, args.video) - rgb_action_result = action_results.pred_scores.item.argmax().item() + rgb_action_result = action_results.pred_score.argmax().item() label_map = [x.strip() for x in open(args.label_map).readlines()] return label_map[rgb_action_result] @@ -460,7 +460,7 @@ def skeleton_based_stdet(args, label_map, human_detections, pose_results, output = inference_recognizer(skeleton_stdet_model, fake_anno) # for multi-label recognition - score = output.pred_scores.item.tolist() + score = output.pred_score.tolist() for k in range(len(score)): # 81 if k not in label_map: continue diff --git a/demo/fuse/bone.pkl b/demo/fuse/bone.pkl index a5cc72b3a1ba835d3b1d7af712c6a3973b862456..21d311924c9b9cfab71145d61e8e618fc6e0cbea 100644 GIT binary patch delta 14870 zcmaKzcf1ru+J%{spqK+FAWBkE5oYep^vsyUu3?STsuv?7qNo_^rOmKnXvGYSa8*p0 za~kzx#IWYP?wWJXIjruwe$QKFcTbD=$YDwcG9@LCrq;WnZt+V7MZ#Akmb9NA3Oe#eI|^b+ z*4?(wMRo~=B^!6-1`J(lO=Fi1?J~wLJEk`~9V!#{89RCWAu_=(H*t~S6T46B(Xm4% z_sHVEby_J_tBslW431rCJ81rrzbGFpKas9hFt$%*`x?6LCQb`|YwRbvN5cCfKS{IG^HR5rE}*}&MUA4E-is{5YS4effy zuAdI#^vodIN=yg_(L3(9t<)?nF*_e@pu!s(yHR8}Hg*#~giTd=GcWw7(Ei!j%^AWL z*&!rHBB)e5Y(AGBXEp?l(S7=qwp8)0jQvYww>EYg?|NGm-_DC~AKD#^9i6&vWLz_q zv(f8@X1MHm`M{|3Uya={vO5{Ov-jPu(qp{zE}`Ak*xm5Gdl$YdK{4x_4MAgO|30NX zRD4fk$3}K9W5;>ld#m_(FTPJ`Cm6eL>iZ1!orD_{8Z+)0k_J?$H}B|6*!!vOL}T}l z>;c9e=uJ;j-GjXD!J$3G*h4XGyD(iXl^dgTgH3!KX_s54eUEs?i1szv*f6q(8GE=N z!x7rok-o2^LVL8a$0TFu_V>jSm}C%aXpSB@Cm&2v;i<;{EwaZNJIxQ` zI2AtL3!f0$6OD}+#7SKYqEc?tL8P6P`7sY#txxG>)t+wbDUm(Z*weh}(^dNnuYG1{ z&oXvKVtUNnOz^cr$Njx?Y4Wu~vAOQxOY^}@6`o~mBeG{3JKLL{qrx3t_?*z5YwUTL zp4)}#Djx6aIXDSK+sCcdw{*TrUtsKok-f;+i@oQ0Dt(EUzBII#8GCu^`9}4e>QqXZ zvt+eW3Yw>^etUkAD<<}8yfw6cKRqE{AKDv?y%B#mW&Nd-uGkp8(U2t5pjhX(vv!}- z&Fb$KW1Eq^)!5s-zuVPcr}sBMw09VLXXB}ebCsyEBm0=Kk9*HgsPvOw z`l--9ZR|6t=kqd|R%(TYozgPFYK2;JgSL73;8|6E&e(rM_IYFf=}rGjm0$46FNXFd zV_(MfD_xil$~C?6q)C_QZScA)wAoix{54}=kL(-9zUghhrQ&aU@pnS|uCebWwj1}& zNq=%dspfuisW2OwYX=wQtM+|W{=nD|Bm0rDAA8%MsPd;?`Lob|ZtNG>{xWO3PWWP1`J3of+wWl9^Pd|qC-^TuwjA6|0&V4~6H@RgR z2GzFa?DZP??)GP<^V{Wtt1+D=$K^ScJZNMQ@UW@UoszYlOl@*0Y(C`J_?%ZOXRne= z{N`PHAc1}9>2!O6ku2)?VxSv}^j4OOWO1q+kSqZUWl7M;QeZl=xr=n`_Q16K1E=<$ z^x)kOG;TO%eoLB9=ZB_8svZ=Y-)?+KzI|yGz``<4ZdovrNBP)T#(T|diBl(q7xRx^klRRi=?i!Yi^XQL;ni=4PR|X>)==dt2&tp|(m>ER% zW(LDT83Gy^3Z{9So6X~yIrEbQD%IcJCFzFd<6B&uZy%-_xLD08t`0`BhU3FQAIX}^ zaIqHEyI30*$_UWNIv|ng-D?+-RLkyeFo}eACX%J%NJgQdW(rQY2u9N8cnS28l$Bwo zLiJ_>SSVG{NDWLQxg;A2QzVI`TJ-l^nGMayw!S*wURMoVtm_ol10z}A@eM#9$%e{s zu@Tj~*ccYdCZLf`K_c0V(p4m!|66BCwVs_NOUIG?2@N×Tl7|9lnZwdNHwo-s_4wA?iN;Z)s|5}kb%2QD?OLRl)NpP7sl3fr{H@iCJ-M~n8 zcYF`fN3y3f+>E7qH+#WC83!8K8%!gawNNCfsfu2wTOKcJY-p}H=G=Vyc-6qgK2C80 z7|Fhl?+5x!CMv_l{#5Vc09Ysof<`8RWO5Lti%iN@|D>2?q8nN>SvJn(U_{i-Ax`;F zFcRyyfIgGS%5W1>y_>^ep&SkxIRZ>GnXynN$uuc(1Km1JXcwAC@6nNOKT;(yag-B0 z8jR!^$EScklBvot@i(eBaV#v9X`qqgKq5Jwl8q$ky;gJA_aqV8nRCf5uBlEyCjWaW8%qis3Gi#;och7~o zq509iv-9njs0J=Bb&8jPkzDTh6`;@LN@cjXit1fl4GZNO(8#qQnOsN7W|B;h5~mT) zteN)qhxQ$PJ-cY%H#nUe!ANd${ASQ6aEmhfZc=^Ux57fX4K#8)m?m(Y-qS7`-}N*W zOdpnZS!io+KIztcd#CO}pYz@RcYu-H>G)lsAMf4D=<^<`@AF<*C<{O%_koP}eo9y4 zEf@63C;35?%7x6mW1n~mJb;FpdC&>}9gO55#~%iL0FNlc%%fCq<}p|(kAp^@0Mh`@ z&rE^FMlwISB`gK{h@DK+5-BxDO};eW{-i44;VGy0G#JS@;8 zo1l@m!2d6hGy$&fnFsb#wcacoaaq3oZI!^pJ5KOjFp~Eie;@RTe4q>yA5y)Ek6@vE z3>x_aB#}=kT_jR2`gcHU#IuzRL2D!{#*uu6$i!Z=4jC|#?!Km>-F`#$-F^!T-rHXbo9oWimHL3d!JKf2xj4My^l<3EFbr2kPym;a^uE`NcA z@;}hXuOK7s#*4Q<9%)eKPP66MYddr7SBeLkLqpBvsXXL>k#u*w2k4J|Pi2_tMfGMD zg@v*hXrwoojDO5k*&hRUn(^6y7rwRD$6amLnunioW4?WHmB7RjPH;&ulBFD98uW=Q zqYM+vQoV`gV4*Az8d(7(kv^0z5+VN98_j@r=6cyLPNXjyYGy?zyb>5mKgaunK9K>+ zFtak%n;8fTWfjoKs$iPP<=I4jJvm9F!h7)6M6_LNemVWReET4kz{FrDI0TGjsN=&x zpU7&;FtIw-n^*%D%5c!gnjnd+Md>1ua$Ci{S&Yw^0UI*EhW>FRYa^m=MmXhlz(__q zJ__`a6qMnnNcC>oV4;*iBV{m+WZ{cue8vol<< z9gJj*ujZrWsti(A1AFnL%5Tt60lJr`yo%xa69A`zfk{i&LHA zX<#I$JAMY}BRNwUF3zHQ7c*d?%mj_h0*RzS=^~Os*{{4Lrvo-*&X23cnVgM?x|!{i z=YWxPIDQW3GdWioZqB26H*;a3oDUkg08BGEYoSb%Sy`@TPY1Mv=Hge)$+usq5}3Hi z30@3FGSBf#Kp)Aa$}n*m)tk5+7RnW%kt;zWxr&mFBsm>$F5>Nd5=g03&pc%fiUYYC z3H5S~)4djqFn(NN#t$6ZC=1SB8l@sNTe#uu$#-job|q$UT&7AZhQqV%hTG8I;-3 zdPg}pj^tiM)Xf5?d>fsZNHM- zcLv2;C1`GO>$UmzM|FQ3Jm%CM2P1jH@h3r_$5YC1@HEvscm@{Av!IdZK=SwpC7VYw zJu3ybW|qtl+L?vFA#ohfqoHR0N#$V%jN}E!Uj%&|FDb*!%T#aX6<8>*f<|5g(>NA> z2#Y;cYW3^_KkcA7dOmmJud4(m-f)6%f|0!C_}idQb{{i%Q{HP2M|E78mKfyxz z88q@AkUaiN$>tFsji6Za&tSpt{4&?&3}MzX5ogFrv# z!OG}$2-WvG6c)-b(8y{aV_uz-9dmNhEY)}xY$NzLB_luB|GMLh?P38f?Dpcw4J%XS|gtI>!YD&HgLil zf-J5$zA@-C*hCp-Hl=zqo54c)6KLemV3NU@gy z#)<5Uh`QO&DNh7hTXFmV&?j=BGTcm}dN&8bLOB>TatN3vavrVh5^_9ADs`5%TUR+~ z7n<|loR@DuR3$KBouGh`Om;j3eI$n|!^GiKZ{i48C`W=ujsl6~Xi7Gcxc8t`be~=% z7Yo{%`-!#UM2~Lr%}C&(_x{U0U9|IB$Bfz z*+`Np%I$8;X9j#~mi^3N?KqJcNT`>YPFGI@tgkqJHs}+Xtqd=7sNPElER=IVBjP+t6J3V{Wl@W~qz0PEj=?xxn!YL7#~J9@WLgRPSOQER;(? zBbS0Cav3F?NHR$Z)$AmpotY#f;z%w>L(N>_gs%iyUUB?t&_|*NUd>!f^=7Vvg>pS; zZI&mO(qM>H)a>93mtiL#ZFX#hVpbRtjQN5Y_VWB(# z8hH>*1JPODMIbd^r?g&dXxEy%d_N=K{&$tY#6wQ-VK9y8 zJP8_k3M7%IDSw@c3p7J{5j66W{#Ww( z?M0b)H}zWM{^`jVeL=m}*4*N!EA#CyJGqnO6{qkj7|CmnzYhAby`hY;y-D?BdkYrI z+n|wmK*shi<&Vb})U#ieXghPXN5x}%4-F0aeJA_@$Wn^qAA$a8f2<5MpHRJ-Php{a z1{(PsOe0wMXeTGBTITzzpr+qf&Ho<<`wLaT!Ir{4)hr;uM96M zP`#Hvuu%GfMpgvV4D>RV{^UD;T`V_0eXO$YrY|CbTDjKwW$)aqeEUkOfQNoguRj>c z0LNDbeI5gq;b9f3_pmA~ltG}8!611Ip=9$&PPjqvyVVUgWS%qI;zWibqHcyc<<&se zR2*Le^oa~thMP61-pyLDP}T;Gi~!R_^a|3Fh~7HH-$Vq}#vxONCKI(<>U?9VTr=Ok zjtXF5q>~#3MpAIR2>L+UlwqMn^%lyoP%5C203?tqB^yZGS+!8ncXx31zA9Ge0|Ule*<52yU`paZT{4Zf>RZtnHU)%v&-#7Fc9G}O$I zPWUL0g$~D$0euEjlwoEn)tmVnER+_1Tc~l9gjet$w|tvaWd80m<|i&6wt`2Aeo#-$!3!NN~^8cu4pV!Nf3=u-qzPyTo&Q}kw; zM|Y#+4kvOB7|FSgp9lI;&s9dp=Tm*h7r;Wf5HxZT$fz%-bT#VCr_*}7nz@6i#S>v3 z66)m=r+X>LT8877gFb{Sl;Pz{s`qjgER?H3BiDdw2>P^@o~rfhtt62eui-c$w6;6H zEaX^dxmG1Gah(&q9*pD$$8Q9EAU7$)#LZN1;ucsaP0+}#Ac5RQ$p#Wn!)meMUy&vA zqFCs7yPR8b``g(^JMVNt^FbCb9KRFv3EZWO&hMuB&hLSRaxZ9P0hlD<{uD0mm3v}V z0iR3%oJKcvZWnM8?0KK=)vM#6m2z%z^&y(t#luwJ#Urp#9tDj&1~R_KDcSKQX;%El zX-Utt3mv03%=Ov+33k!}KIxR60$Gi4{29;>@L6T_{v6f!{ts9v&x1z(2_^$<*uBz| zynaR6`cOu@bDMQ@e1Y;W-G^RZa0)Mik-X&i%b-7|uPCF}SE;_&*I=Q%4jOp_WXx|; zx)^iqcYlEvup#JpZZm$|Z?Ttl|F)BQ2V}v)@%KPK==YV;{RdRv{fDqnJ_3z=3?_pf z21Pml`LDbpViHKQzX0Z&67|1#(ihZ}uj$&wY07pPX6vdoC zN5w3dFhBK~Gv=J0&m7)&tLUETRsJ~lbpNQTQ@5(CPWRMa`bXO(XSHjbkvPz6(^&Qz z8jBsceUMJ&q4!kvt zx8^Qwc4-K_wG_HkZfxi`yz5Zob@RRM#_Q2i>`bX6Wy<7nEt96plooI8ve&cJw$!e% z)V|cAcu8f}(a7LN_Qe%j)? zXP30O^?YxA<89D#RN3p_Iu!%T-oRQa2CZtEUU!b#&1Eay`~ju9>%7-6Vbcb*>%TC1 zgH^|d#v2lN8yT#+fA9|+}n~31EOAY zBVGNg!@R_pq_);%yj0*d8!zp8ozd2^uC<)+<&9U+UXN(}Tc$TJdn0T8-Xcu-d7AR5 z`BW}bnJ{HQGz6(!wac2b6Qj1&wzo3g)`7Q;@wRnsZ>Mc<@7f;adpj8KUoEZX-Hv5% zr&8D!IgTn$rz&0jfhR{PPUT}MPPk0B@pjgRcQM|sfw!CSc6TY>LmM9L8s5|QjPdr; zPVfEqPVZCp_N{e#OxWqlYHiv)JR|XN<&wz{Mv{`rxl59D^DUXormO39zcewbs3P_= z-q^s~-*^YO-XEwU4ss&K`QCWrO=#WwiDhq6DQuM9cZ|AQ8J!+f>wG$&te;-;e);#eo*INv+ocqg>({E20+TnZblUsO`* ztX)*1{_BnU{_8rW|Gbm5>648&Bk)c!-l?wtr)krtyQTx*JHvQqYX8q_{hXg&_RgvG z{@k$lYx4B!(4W(3)`|B;y=_jWD=W9FJ@Q<~hNPWS)W$ncC7o})3j*&#{(#602QX z>R8!eXx~O%PjB(PTaCB4^$;&9d$*OsYGY4#vU19Ybx(IP8Ox?_9s19^UHexx-W`E= zr}6G`*<7mqyW92e9^bpyc=u`l?*DuL9w>Vc*82BQ*uTZ$1n23l^{bGo%nEAjYO;{Z zR}UU?S7OvM6|vlS4+q{O#(UHy`7ssoxD&C$_nt7`ldXIIRM~sF6gFDd`*b0#Yg^bd zm-$Nn4SS7zM%#JTc+Umizm502>+B2K&Wo;{|M=cZ#(P;i`$}VpE&J7IXQcYDzD#p- zO1G!z(d9Z{89l6L?lo=Tb>qDecyAi-E!V)?+Q2)mfp>lHJ>#uxDY5ImAJtts^1>B$ zD^o-7K~YobWTATT#tRZ7KhUZ_G~P#n_p$LlaXI=_tNzSY{kiXbVZ1L@`&W%6_SmnZ z?XCXVPiE5g*)N7Gq28!}_SLrNKkplD@mu447kJ+r@4v1yKWK~pb1nYpdp{ZP=av$C z=`T@Zl|A2B5v}O?Oyz|RH3Olhrg~%Y%EahjwesJL_j};|VZ1+GcmASs(aqh~FAXqX z5}=XQK)NPviH)Vr%hC=K?Q6w(`anhFfp4-)Y3xSz(aaTz{iMB?>v)EAAR1X645Xvw zoj|KYIx9oU8bl{W*DPPw0*!P5TS_~Yr7I+oP@m&`DivQ}^+tV;JGm6Mk#1TAYP#E+ zdw_wgZFx`7rd)a{Lrrg@Q?m}tmp-78zMv+iAF;8tPg&N5tlIJtr4E&*UbST|-xPlW zxz1PL9#J7J>uD*3tWPwu0T@Vs%LjliEd!MyWDwB_84UAfL(s?&u%$GyEE_@U_uSfK zD9lZEX{aqq`9dL8pA~hc`cE3Q2E+`t6%PXg+1T>opv%f8$`GTT(23a$=1USZ(gbQ& zQpCp6^s+QVR_%ptvAK2u4j-~?G84;-UgxX5x4tS-OlvWeWQazxU?4fm^Po#hK^aO$ z5S^0EVZMw6jcft7luj(mmXP}0I7$o4;-2YRR#+8dOZ6HqDQ#pctpF`s+j_SF1KHN{ z?Le26?UkWr6wztf0p`oUKqEVXnw6c1jis~8vNL4Wj>uf8TR}7Vd~E;7=i~dwnxUv& zv<5`&N;I+?7|8CH?*Tedqm?0QPofiLV7}}H8rd6cDa|R%K9Fd8F3!^}q%wQ*@^F+g zxm-GK%(;=SdZQL5Chn_6Q8C8WSOf#v&+@UL8>ao0q2d6dQ*j{7mxDkf<3LT#cw%E| zQCTKHqMfyPRm`Jja;aUM&jCoSbj0+RLxd~syRfbYA(!|OF$#@Ky?;!DI_u%#f$8d)V@RQ zNy?^@F(<}#zIxU^^Ah9dYe__0W-D9(26DOOSAcGy7Aix;l|(1vDwr>eKqFU!nwo2f zYD45&NMsI}$C1&=&V8>>L+u?bi{q~!u5MvsuG0$8bUo3?4PYQQT7DDg5_7XMG~GgU znr?;pvKTb71XPzIw?QH;#hHIwVzSwMJTcjPzWVyuxrvFlYe_^@ZH0G$f!t~NU7$R)>D^sU8 zsuxU|ohZJnr4aH8(a5V{Ag@{eI_R?UhBAb_NpwQqg8A|`XyhHxnwNJW_2y;u&?Z;5 zY8V_n#Dzj8=3IJ&DS1zeqhzJ6^?fjq4=n!>bSe2r8A?7TIwhaLeEAeK@)@Wp`J8B- z%NLM3=W=uXJjwEE6MJ#h8`U=sy(7{5r4~cUS41OUgMoZw`M03U$#=?7@;wnHm6gZ! z=x)8rkKFhtuaeEiUMsnb+k&;jJp7~;(E86rBfo%w{A&4cpv%MW%4q!$qRYddFkk)x zjkMtdl;)vYh(^{016j}V^+6|N17*nQPjoT{zvXHnOxbj+ z&rH3-k=anIK+O(Cd8l_v05OicMM!B`KnjW-ySn%XbBxq}`MuX?LQNvPK@76i=tw0TjM@pAp2TA26V$zRECQE zh)%^=m@oT-Mh*ZqF$WT@PdNw@d4oE?oo&v{UD>B$;6|%<0}nqC_~1HL?@#R^W`Ma$jPAfBQqdT^Y!^p=PKLOeiX>2 zbG)rTbav12r)UA%KGoK98W_mwmIt7liDxLI?K6q4?XzIMoDCW|2hsPsG#mMq;zJA5vI$xbW^Rh(od@X~F3y4N81Ou6A`7F?-;38$ns1Tiui($UZ293-C zt@)S>iJB(`wMX-v?P`x^CKKP^`i1lF5-pCBdA8O|!9eC)ei`TzvOpP1E+;xASHOH( z2pYK()P!6`wB}!`!UYICet>ut%4bjN8U?A67em&@ta)UCY z+(>j%Zi4x8Gic-%(0Y+uA@yFQ_Aq1%+4`SGv(0*A{KY-TFV-TozQopa8yLv#mRCWS zh&zqp6XNXS4voK$t1C9I})Z9EzwEp7-NMrzt zi{kzxU6~m~AAqx|bUI%>X5o#A;)_}c9seO3c?k^UWy@ayosL(Pq2o28)A2gYmp4Eo zZ-Ulc)il1pQlzdJ!@&y>kmzIA8y1aa? z3?<(Xosw^1zI+E7`5v?ep%X2#QaD2ikoYyY+d^tkZWr7iq${V)P~uv zni_~k5?~;!S>6_On%XHtQ+uM*)B)zp>Y$O1pfwxrJ>Z<|^Zs4GtH%T#}E})z9w4#c!%bP_Y@&ND>UB$?_EFR5U9? zMVjbTWMIBzK_fZP+LAmZYMp_JT5C?Z?I-*wkkcFWE`DG*Fa@mwH6v`zn}dOjw0sND z4a}CxP_q@$so5Il%Qm2qZ9&b+c0_Abwuh{83i=_|9w7;pW%HcV(USzR8`bUat0an} zv>Z})AR74>7|4#6?*zKU?5qqayAYj}U17fL1{&ENv}R=wNWEE!9+OnM*{-@_^Sny% zy0!A2p5sSr5nA8V)?~my_Og6$&?RCYWwgF8(X~DX=1UPYvLC347)!JUWq(NI4%D%Q zQzKjH|K{LOQkO zrh`U~1T`NX(fSmD#J-Z$MuE+@-Y(@*dSk|;J;%0a8D#iGBS(RO9BuhApp$W|GGrV_ zbTW>I`Emki-gV)(8!IT^&>Yy>itMG6PoRD zjoLz*j{l;yQJ9jOwF0!mh1j9gRNnaBDYl?z_#HTG^Tf{J^HM(zazxzF&FkhYq zjXVQt9-bvy|M46o+VSiBM>gN&{73ZpgBvmbF*F>Ve`^&TE6aWuK7gYGGOiX-3%h205ZB=i9fxK<`JD}_ByUOV8 zdqmgUl`vo42aS9HYHvRzT1)W}B-+%AH`c$g@Lpg4#=<3UEH}KzZ!aHfA#{8~H1a7J z$Y+*+4murQC_~4WM5p5`m@i+0M!o^9srVKWwO^O*oc@H$DY)oS;5zm#dsvw4@3aQQ zd~YlMFBr%Vmj4fQlki7ni1~@=#QY5NKC8YY|=RU0}X+1&wqAH3{8`)=%_+ zM4Ni?;@Hf{Co2<{)m{hGZBF}wFFVK*`*crVgXDVI%LE&PY*C z79yWxiAMGZIa_1-fuPIhLCVPIIHJqvc$hB}KqC`DYYZkqqV}2f(I!yHG}}*nwRz8t z`gg3&!v0RyDo``U)_gD+$RU=OKsUvvDnreoM5pF3m@m^nBZq^Ul_QAO8cc`$vo+vV zum1Z<|;$R zB}6A<9?X|ZK_l})P0nRR>n;{RqTSuOi)dLWxT9my5?aVD-M4c?HBr1=3!&o*qLGCl zCu}Ug3UoRaDMQEAM5p5#m@n6YMy>;`ySN?_wO{WpGR>{NC2=F>u!xeYXOJE&Qy60N5Zk! zyEa^zDBh{XP;wX1$Wo94HkRK5y1d-03?=swos#=uzB~XLc@VS?;~~gDI*e>8{+&3- zjaXiq!@MlhDp0fB*8DIS$Rn0N3c9>JrVKTY6P=nBFkhYkjXVizUY;Ua$MG~|)t0ZH za_mnm;dIG2Wn!*auTx9=_G;iKmS?maQl2Fmc@E^1jpfgSE-^1CL&}RpC*?mdUtR)@ zybM~`@d~8gbwtgllFoXC>n|UQn`4PdhlzPrt3b_bw&vHtK;E$YP0%IgEoG>Ao9NWM z1M}rw(8znBCT1nk`i}P@G2ao+mwZ#fZ40p*bG!6y=sETSErgB_iAFvGIcQ_~C!o{u zsWNnYMszwphxzgaXyi-KT8*zDHLEeF{`-AC6+cO>*ZHNK!CsJ<__Y>8!8f+HZ^1ym zv;2F|jmv+Pq2LFiQ}92SFF%4tegZWUKNGFb_yrP~0cT31A!=%MDmKrJ^wJgm8|Ej9 zziK(8{6;kLJIHAp%l`zOl)scAr44^z>jj!q%9qf>%cg54|3`r zwAQ5`q~5wjuL>%S~hpyp)=(K?rnATj3>Jt!%C!Q{_LYY#Lx>bK{7D63KHK-N&AkzpW5Z!8}UI$4`2 zL)NB5Cu=j9FG-Ly_@K2eDM-z_=%=)pb;+kVRmO+GD0N&fow;$toW%HMEs2PvN>pEB&fOBf@qz~mXOFR)KBE*Ro{D})NmuV))m6kY^7D8 zYHOmAZ9q=nSiT+TQnS4>RE;7!RXe~qh!1iQAGFS8CrG5GsGs*@smT@c@jpz{8%y6# zvi?upS&O1#7hB`5U?96$zB}l0vxhQNj3zo2d%}D%ppm^m&CT9KYh3n$M8=`jiB!%k z=^2o4kTP5^O=lYxCyM)ODTIt68YzODxv_jK=!ERA3?T;)osa`z9KHuRd=FaZG9D5M zsI!&4S;ptBy-`1JM}&hjL90N`L|gMDkfS)3PXXQF9IOmAhY+2b63myWppipCP0L|K z>s+Qm>OPDtjxRI0_@4{sQ@Lb%>HN(b?n)F7*Mev{f@owq$YC7IJe>UE7MZ%;fMq-HH+sLr_aD385#7fgrlh;=Qu}_J$Gev+ zl|rsLTWZPGroKC1iDWTbtWE9GYmKg>x*|A!OmNU)IooZrUDVjcLfhTg#Yc1+(G=Px z6qc;**tyT@ORZ&WkHGdccIi>w`OyR!yZ`7T_B&k0+GWNqGH6`qaa~5ustxb+Skg3G zdtrLJrrB(zKB?)tOnKRHv|7&CUZGvy*xt077}^!I)rwQbX`ht>+sD|xN1k9;9yBf! zsp~+C#}ibV^R=b#@4s`q<*aQQGqee3`X{b-745aEvHe2Z-`D|utby8VHQ#IXz^-BJ zn&TQnT%kiGjY|32i_`lzqN|khmAYBWsX3A4lbegfX&|YBd zh2H-~D%$ErFAnS_#$Jm5%hLW?yOD4%IwmE{sg0{Z5I-56lhbbm~Wg_~QV%_wTz+>)>72F=ct=cw{E#$FrR>x`Z2r?_5~Z}7@D z2KFXnZ)S>HI+}v*h$AbjI1(>sv$Yr3?UlP#wQn=F9@^WDy~De{Q?=W?_FaL!+t_;= zu4_F9w@ba~PX@7SkqxKEkaIHSdsX&6WA6{`1I9k+EkC5P4|~~pfqlf-N3s0aUo7V< zEyZG)y(qHWSkBh^<$4t!SMB-6J`vg{jeW{nepineq#&`=YTgh4y7*U-6z_Ro&OT?(2bl!`L_R{8k5^OU=bv{|Wtd zcXi?{7uz2;^%Sdr?AzMw9b?}O?Z1qD&(H9__WHp0`Y^EnHuj^&47J<_Zg=&vwS8KX z2}&&-Gv_zY$TZuJRrV8OKMn0?#(wTc_(EmB^s-+C_G@FmVT5nfBj^Fg=aH>FJi;At zqQyeK{RE5q0Xo_5wAJ^<{t((9js1_G;J@1HC*SJl!2V+Fuc--=F8Nya6?fS8UGn*2 zJ-gwI%og@H)&1Sr|AqDsV;A@l{zO;w^zWpl2^L5OG_nYIV+xmWZ8B=VRq7wbcZ&&*VNs01c@I>DvEP?m9gSpjppg|o5?PVbK_bPXI~p_+VL5e7TQW{$B{bAbA1B-w3}t1^_OHm5tb|U)-BG4+m{^$wG6XcT4oD(;9hpvqtJ7AljO6?Rq;!R*PG}O%IPIwD2lr0_K z3iKIltqe2UP`#NEut2s2jcf-d8C=ovCQ!;Y`!is?Q)HX-^*hZqndP3!^-WLMBg6(o^SlyoAEU9ec`%M5;G&5-gCDK_inu@;HT(&Li1ra%H_MN)pMH zQ;94SCvqwpYUVU29D<=tcKmeECvt`|%uJzrGiSmAISVv$Hkc&xcRNi>Ay;ZHSM-v@ zok^1AV*T@d*=f#E2~3>p1kVFQIp6WApig9)GECH{-oyp4KrRH0Tm+IxD`nwCqHe|J zieB)XR8E#t7d*?xiCm0^nz_UYUkZkDnd8$zpUCCPFf)Vd&CG-aas_DQN-#;J^>4P3 z*h4A4koF1bj$Pk<+>A_hmP%k^wiCPx4CQLa=YT$uYm{N)TBKzLmB7SZPVjCplzSY%7xanTrwkMKQ@x1?V1Yac8hHpLk%uWABvRs9 zr~Nj`dU8D<8>6&${SAhO)!+V9Df`1dAy?xEALXhm4Cqkc@H%5KA7b3 z*P}?oQ89V|5bZJg0H8LJW5@?8fQ1j8+`qw4K63nH(5LZh<7`7fAE zJ}J7>j^g*%84Uwmchna-aZ0B8lm3Wle|8eTfT8^A_-~+}_IG8p{U6o0{R0-r0?^2x zAk%i@8D9s}mi&bmcS4bwV(tB~_sAxGp{+AiZfd|#Iy>G4^y|8-GFo?|`qqoW0$B_+ z(j81DzBGEu7B$JWzBsf=@3-RRa;5&|Bn|+J>#yj$gwt3O3}q?Ddw_oAp33OEG}ZTA z1{TP&ppoT3M(#yPkKEX}qRWW5_d=HuE5@sLc@)%2ZzsC~7|M!{uLSxC`Y6LnU#hpV zGAxi)KqISyNd(jWcK7GeX0BA=*@C;-Xeq?YmHNYHT#~8wQwdD;cY*`JPzE}_8tC&_ zT^T0Upn4N)!U7ot8d(b@kF_c3Jal{W-FW)U!x%jP#LKAzz)EoH`;u-6WRm} zWmCsD1APFSE2H@qRNs6{SRh-0Mz#i%0OBWZzD>TRap~mN1IxwwIknlD>Nff-`i^iK z+k&BN=lJ%ZA9@F6^xcu_`;LSKVn8E1fegJfB|UU9B+vK!{YE@Bf6@Maqi?(!?1G58 z+0`jm!B9pyz8mOM*j*WJ_MmzM`F%&vNRT zVC8uBgVCVvK4-aO{^2ZX+X+M>{?d^y~TiZrC3*>mv$O$0ho=E9n+it;8_D2 z$GChYee<|#yn-)4LcLt*bT0x!X?6T!&?j(-GQ3<$^rE*J-bB?=tOqNqOkNx6Au0uo3%yq)ogQ48u_>G`XaR?x_8 zV3J7fFNv^{B2Ufk4Ps-7N8w#>yfjm-s{j^mcXD@tq1@?s8|d@6OBojKrg{tazyi4! zG;$wE9`{qyc{F-w;|;`H$u6{k^pE3s015T-pwoQ_4CP_R=Yc+sN0i~^QL6Xy7%Y&- zK_m0QB#xPXiKDTKvSl8M>C=`d5SGjJSL<^!)hAQ}6Hhw9r@&C2cKjL82lA{kOgu;R zCZ2}{@&ah&MUX&VqND>!x^sx~_eoJA9HQEv4+g}Eyo`pLd4zZ=tl(dkyHQJseb}8 z>ZgQj8VU#`cc1y1@awe z=00#;ruu_QVB$w7_#ZHo|2qB?=vV&F$}sT@)tmSg7RYa) zk>5cQ`5&c&M6#u_CX(zOrP@u8_a6BNztGkToXnpf??ITsNE7Ji&nTnyB2?eHGc1rU zppmX%GXIs)jZm~U*#~n!w%gvbm2CaCdDmvD-Sk)VUDRnT28Pnz@x?(u@)FAEyCl{3 zT?!UR570sMioXJwV*D{_g%JtQr;E{Y^Rlvi_PHz=3 zlvN$?2l_nvE5pM8s`oGu7RYL#k<~%+Sc8(zqtPv!=c9`;`T#mU{O}j;_lh;*MAk$^ z-3)TdYk|D^aC|W66B(imH|tQno1w5k)&-4Z!6cE{ETt217izqOe4rBu))XRoWcO#HDBaROPeIUb?;bjx5_p&K0kj+3Nn}bOp(^*Of(y)}%4-MQk zm5ZamDRb{vxMkQv1+cKCliLamWoyT`0eu`Jlwo08s<*HmERgL%BRhb^u_GlNhu%p0 z@M}F5xaUTGDWCopWKf*ONEFnHak4vsycKbL7tp7%t1_%qsou&cSRlKBMs^32G|pqG zT^bW!*BvHul&gI(ctEnXkjCgYS~Jx>Q~(QmI=Q{TQ1*6wH0aaVM;R8zP`!nHVS(%i z8rdHtjj@z;8jWVzvO9@1)&ZIuKpa09^z$F0jOOF1zWJfB zKn??q91bS)pA$bz@v+C>JVXiTHxGl~yC74Ypg*FmbrJ%Ga)jdn=x07s8EucE`nE^I z0+|RJIR<3rV<{cXob%stxXWs{{qY?~`$NRq@iuTA8fxZvCwv0PYY)dy0(}N2E5pnr zsyA~AERa({Bd38$1~;?RZX1X%tKDA7TM>U2iFR5{)~9@QXQmpe1STds!PCJ|&TxDR z=o8VSv6?uG>P?&t3*;Qo$hjbi=oXkxq}@KT>V$UtL^gdA864+vJ_>4Os*{}t^3ucc z3qYU8h03sU5!G91g#~gkXyg(w$wL+XK96!sCArNYA6?GZ7yE*TOP8tyCN6V=dXNd_ za>r+YK98BoFmVOdo467d$Slx^9;wOWDoQ$!WUt6aFLvWYXg>X7cSxMb)o7@hIZpT* zke42gUkCa`<|@O?^;B=>23R0Bf<|rvlSFj2r4xyd66Kbb#^Xobg;?hCW79XaOjS24 zHF1j*ycG=PHplCrPvmxGn7D)LP233!qzyE37f2#^Q__jBh2%=5e6b~4>v6wsA<~S> zk`9DT#6?iyYpv*kdt*8Ez

O$@6G%jsMoWLsr#m{MT3vNzax3oV*t{YGH5gHox$4>YYkarl4e+K#${<$*Dd_nbQzJvwx6=>vZFbP2q z7Rd^)`9q}h(2|W`jb|8d9&g851>!vM`f7!57nFaFD#Is zKqEhcB=QR-od~;M)V-kJi6?si@4DItl8poT6$$n7o74Rr7R4fs4*gu?rYVSI4`7K9NP0;bJkW zchMad$l{=pB|s8cl9Em&-UC=4(Fag*LJPg5&Bc){g@&5x;e>mFyasW68PG?vtTN0j zNA+fU!2($xG}0SPBGFS$I+A!TRd^!mKZ%Pr+e*IJHnDpXPh(b42~4c$1Xlt>>En1` z&?mC8GEA&O^(Iz@1=0^R(jO#|0hDwii^j)<+IQL2I2=X2`S!AP!ctAWb{NPnwDoFE zW_6I)9*(aG`uPVbqxD);-+FCWAcH|8L%_!T@i)8CO3(RkcH^bCw|X@31>8FN3wjQ9 zzh4&&CF^(&^yB80(X&AHJ&UkFN}!Q4$hggv^tegb`RK+X8aSW6u_(mrzk-B%X>q#i zfxP8#d;`$Wzo9a`Y(({5HiiW<3^XzvY|KAOuK?2PKi(!QEd}>BqWyB)CcTP>773uGJ6$Ow=?wx#?{AjML1iGwEJnY&|zUrgUy z6yrd)Lqff5?{s$nc?073NYDpjl;LG3s`s)pERbD5BfEl0AbRk}SoE?2Cq)+0O~@5As^X@dH4g$brf*Gmh%b90UvGV9>}RV3LSleI|*t z4qMM%H$*1lFXUoVd?D9%>ncr`XR6~>1RIAs$-}@<4tIP4=rgg(upv}$;|N$F0chk% zkW7xEq%&~`3C*DP{nXVOn@FLOKChPJK#oR0olJD9$AG*zar`*Y2XeeJoSZ=QPELdc zauR6dWH1TjubW6>XmqPTYPmJUchGI8uC5!%Bo)BIDNgQGFqG3A4?!QsWMx=5o$4)| z0SjabXyiyI=Ehl7yBB#n(f+L39LI4s66)m~r+Y5QOBKh@2YnnXCA zppRsZGR#~<^=7Vx1#%r|WGqDy0t@VrE zP5z<&e5d||Q-2a<)TbyNjq3mNLvnyv=s!QK7mxZh8anDTPWV}nmnM!s5BgDGP{yb) zQvIke!2)?1H1Z0VjQaOSOdjFp3WY3>5dFPKW4Y3nU0;uwuc`<(UUQPKgQ2|P_?w_# z{ckD5#@keH;~iKa?}A4D1(L~olyoLU?c3$tf6*k8crksDSU-;BeI(S&2Tu1xFqD5g z{t@US`B)iVKB0OqpTYw93^ejNm_+i|dy$5vLj1_iTjBv`+eO2;?%MhbRlviSPVXx) gl&>BC2K0G+s|*j{QN4%nVS)Sr8u<}Ca_rFm12OZEd;kCd delta 16018 zcmZvjcbpVO7RQ+pkRXD}qKJVYxFG7x&g=wH#DIzz3&V(M0nt$r?6+-p71TyCf`cxg z0!9S0qhb!+DP}$I%zCGHdge3dJH79#+Uf0H+&_Nr*Y%<5{pwY9)$5w#lV3akKD*0^ znaRVwPW3HbqQ2C#Q>RWR%G9HdY@RYhrZ#)Y7H_RmX9|z?r=H+-p4WeHqRYJP2iJ9- za^#d5M@^m5++*r=B|W^gOV_pT)I0HH_l80?lgni?nTj`P$U5OuCf!i|D7id2tXruo z?M<8HA3Htmb)W6^Fka8VTgQ04#&;TjYT&IqJ`s4mD=QKM>iY~g-g>^bzVS9_E_J8Y zG5(R$CQY6?t=a3_;?pOTG)Zz_p55tglw>@iaGr3e{|2cz> zi#(^i#H4K!sMf!l^|nwQLyb2q@U}Ewz0)yVb&PO2w(`A^#@o7ijj`CK#oM-A^IckM zqaF6Mv9Tdld4Icsv4U#%Zp)GrQtB~nyoSKb7%%Ih+^8ON&ST#93dSpH&$sK?^HD9{ z_U%32p}pr}z#H-!@N%u=4f#wi+VQ08eWAq+sE~KUv;;ibGN_m z8RJdRULVl0*9W$E2etS5;PzgJp0l~eTxHUTfnmS3AlmP>YW*&$kVA|&G4KvG-eIoa zhpUhyoRA}ZugQ3m*6jD>7H>+q=DhOqGwuD(<{KMg{nmnNX8je(gO5^oQ;m0Y;2mSU zvg`M;>h3t_?s(suX1o)$-_zH8;b*jXC${(8YwLOG7CYO*F;Asi`}9df-m-KyTRmvQ zyOU!@-8UQ054@9%cd~PTin>45xj)VKPB-2e9pC3OTfCNX%^mM^80f}qLskPFFIcZ% z-N9$6tFw(aGw@~^?;ID{bJf*(&Q;)hvyFGY_Vj`^dpf7ZyRf~d7u9;YB>rBd3LR#c z7F1_#a#M2b#p?eO<6Rnfml*_r9f4TEt@xA%RyP{)PuWa#J%Qb(su4c0|%uB3Y zJzL+UT0wM9b*as%1?qaC@fHQ%RmQv8b^RK3z1X>4;(JStcdd5)x{j0k`WEko_Kx3J z>v%7gs8utQt1j#@@63nnx`u>&zL2Us(PxmI=55b~s3cv>_0PLWB`q`F&4IVvcq^Qw zTU64mPSS0@cf0ZKSaX`++2Y+*uF0reJJ3yY?SCqrw$t3sJ6BL$JZMRB!rkhuYP@>_ z?_T5G=R&_yo!#%8J>Yu}8t)o|457XXnX%3>+o`GCkvTE`1Gd**$*Z`k$8 z(a)>H7mW8};JswLmt9Z)q7Gki4*%+Ve>2{z+SAwS%k0;$hudOlcKck`UKERwJ)%}n zePydllKD5(&6~!1EAZYn-aF3CyXxle&doo3?>*za-&|(n{b%SQGJEqmU6Yu-hqujM zTM(Tp-P}y+cm* zMUsiyCTquug6bQo84V~0(*A(M~iEiGkO!g zX@z`F-!!|<>EQ5OsN9+Dd#H5PD&1$x+E^pqz(BfN-UGCqk)FyDfvkhIT@rnteOVVY z(i?0p@6jTCAffltf>`(R#fEGl+C5uP{l0N=a+0j4716N1ZEpiGkiM4Jfp)$~KV@jx z5bHE-1oNdoXk=qhBQXG5Up}x!Hi3lOTWnle{G#h%zDPlITs2G8KiO29K-FegBb$SP z477X@=pr*%8LEb0ovJNhz6=G83)76jCu-wSXvju(0v&uU zsP-DQI5|b?wK6(}+crmlfox^@NYF)RYh~!rl+fwe7UoL|G?E52It|$R@{AVAK*9~N z!$=hx8r{O!E{=k#8M7o=%4$8NG-8e9z(Der7eFVas0=CFVV#swFkiL@jqCt6m(OgG z(U4F=WU3ZYtRs=Bs?UYWm!s<@@2E9!KE^h*6Bx+OmhS?(@z_-v&c|Y%^W9*+><${) z1JpQ-!`7GQw8);2aJMX75Sy`uRJIs@A+?~oeC%b((s-?hl)bP<_67sl$MSta7l-|n zA!UE8lVV`LOaP4>05+GeY>@*Yp@di*3I$F2!~Hr{D8!QfUT&Ipl7qAb#2joJE`fm@ zV);bS#pO_Ch&c@F#2gOucS=&I?6s(b>z(A&2el+N09it3cWvr8REXr?r0 zT=@3n*&N!xPjq?Hg6da$&Pz6(pjA;Z-L^Ob4CF-1JtKjxuCih;=e9g86bWXyg)5Lvtxs(*wB-67Js}H@HkO@?D|v(t_%O18+!{ z=4w5p%)=VF91Nsl`Fzkxxk4FIuEaVituS8}fJPRAnm@=QNGO5P3Ddi5Vob ze*ASR6xzP@YVVgTn0H9s#AR9s88>5%EC&NwVfihflX0suWZZ^zGH!?YatCPSPEd0W zxeL;scARThxmspW$mSYa*XxrAQv)r?R==BgVY2CNt%roFZSEd0kb5n^4|JolQW+BN z$2tiQz0FVT!=$g#FHB$WWr6Ct46)SJtnM&!gG|svg4{c^nMn z3CmZ3E--&mhN?efovJ5czB~mQSq<8ROx zU0JlCdd1Ym$)@+TE;8P?P5u)M4`H1Y|kvH28hGnUUF zVY<+|WADW3a0X}6h4>7nAY1+HxNDPLKi691&ZiB?pBl*5JgLiXu+HVTFkij{jeHN< z%;X11=qfr#^C@>)5RQWuMCWMV+Bp2EO%Q;eY|B4`f&60mub>-;|0+Yx|FBNYZ!lke z2aR;%XiEc-z}i$L32Dm{I+xcjT@lY!8gqO~M+ZX-s=JvWjby+;vX(c3E;2b~Xv$-qrUJ~DB4}hg(55D%AfcAhESIz` z43oA(W2R`&U&8m8g6a>a&q+3IuQd^|gKcm$7|4#6j{#j^c2b6jov}{DE-+tq1&xda zH88tjZDO)JB+Lrz@)7Pl9O}kj9WAKlXI`5u?V+_0G7f8GPcV@2mhS~RA$u!B$Uayn zWM7ys`+-LG2W?tnAfbThtj#ran6+9EowXa)#%6*xftmws%Ljsi9Ax>ypc|W#GSnP` zb!sNUd^r>}au}##IUH*flOrIp#H4+~*n{^iYlnro4hQf3Yod9yx{%Dvl$ACu4piM}Qg@g%7W!`zK!xTDSS^BK|T}R)Qd}AtGof|AnHXWxm==<@u zp=n?sCs;lmbR#iC8O~3{I_DnD7XghlgW7i=Yg3VvAmQfEh}1L`GnHA{FpDo{^6}$= zjcYwWS?eR|6s(a`!9Y&4{B+O_#~I3ybSBnGYJvH37HH&b(B>gCA)%B%I36gba{1W1 zq6O9WE?k;ynx$1yagJ^ATriOHEDt~znAyruaX!|mxB%wM9MH&xpa$k5tW85MhJ-sm zfvLTqMLoETe39yN?2B|jEi{*C3y8WDYveL8khzx616^n?SB9tx)`^-A^W_TA$d#Z? zLs}uBn9|j2q#?zIwqxAd`)muUg}K)!Coj;t$XIBbTm%MkmE~81E;iRFL&jpPld%Nm z%TmzDwV=l4I;>4Yu7`xVL1}Sp6E0>toZ#7lYUPR>lcgK99#U?^8o3D!WSQkRgHFnF zWk^|pby9AD`Eo008oT`LYr;azChncmQkDkq04RDp2`)VR)`yY|!(XNGf6rs$VU-AX$1y zE1~0ItdU2+KpwUHG0;Wfab@Uu0_$|Fg8A|%(8!-bn~FRM3H`^SkdFV(QA|@1T~9Zy z&C;i|3Dm5%Ek6wg@{Hxrf-Wl0DMQWkSf}O%m@hAaMqUCnDlcPgZt@pM?A)aN-N|L6 zDLgYJ8%yCgtEqZLn?TiHu}1y|2J))quYpe0>&j5|2G*&16XwfXppmyho144?Y0phA zvO92oNw@A-WF6-OD|7$eOIe?uWWGtNPFn~0gt|X2pah}sGa@@Ym<(TA>l?J zT`#h1tdY-_BG0itZ(i&3C)xsi{uFEEGcb_PE&l>^eg2O!`uru<_4zB9FJFU3z5#94 z@hv12Q<@X57vVspGV!m4A_dv%={L?zPX12oBIA48C3*2(x4 z=F5LUBmV<6HoswQ-tjvm-1bYA_56qy)`t1g4K&gnw3$Z_NGKsXJZY8-h9``SJ|D5XUsF%5fbVr|JH5a_*0sDh z=tiTDGJLOxb-ve!`LY3Mq%WvpsKeSkqaP&P@S_^Ce73Nqgi99Bsmfiq*G=3|Yan7H ztdag;ARAjg0CXZYQHF?3u};KhFkdzYjSK{BwlN42Ixo4OV8hKm-P*fPBFs37X?_f- z_Pz6_WYb`+g@7Tpu`R$rhFU%hbR)2(G6d9Poq*vmUq*mNwgNQ@Be6E)*cuXU^N}^H zP;C1dx;7MAP+9k$eiOIR8i?2yYa|5*lD517bWzACLqrzqL^Q&D$$>`lpv^G~kkEN) zerzTdiun#l{#sC#d#_416}27`wzJKR0t4CJ@*O}IhtbNAup`z<7z6WVC(y{wpvGYr ztW7X>g@k*0bT%}&Bkx)miuye1fqoOmY6UdxhBdM~7|0%$j{{v8_Ed(3@mQx}FPJZT zgGTlNZ9cItB=p^Ot0@>f zhWT<0Xk;;{!C8W}3CB`MxW`9_ET4}bA{VtF8k;R^vAI^8K-G0vBif-rZm|4D(8cB^ zWvE()b*gTL`LY}|vI4Xj$1RXhP2>wDT*Zp1_z8QFf^0SYGG8dSYF%X9W}Cbn4CD^W z?*v_N?ox(~yRlA273Rx5ppkn)4bFX7n{lj!gxh`TGP|m@?RM!@x{_Erq)ix4d|hAk zYHPA|zt%y<16U&uf`L3_`NN=-@rW{HJc@NP9)tPvIB4Vv&}JK}AfbE4CR{;sv7cXx zxehX~vuSZ^9dtmqeHI zbSl*tJ7jVN)wkZ7pKN+d>(Sx2ZFBE{fxK(^-$57re<-8F?_phs--r3~PteG}K<)4c zSes~k2nqN4=nKLc7W80Oor<4ezWfXt`32P2{ED^7$A2MV0uY;^`O1EC+^s{q`{emeIsHLh>3>=Q4ZmTH z{0;`viCfED#Q|L;lFHDq7S?I#4D+Q6XrwD>laI9_p>I9+3Rm@9^d_KnuQvhbf4DF? zxtmtPe|Otf4=|9PmahZ481zzx|8=p>e{YyCeLy4Yff|GLu{QJA01|HdrHk!U4VSCN zTrT!=N|AzW^}|mq$x>gfhm<<3k$xceaV*~mbW-{&L(0ZjCuIQ4mrXz;n}Rm;*bEX% zhz*0iVjZgCu6JeDC;gf>*ME=efwq}JU?77n9|F2j*g_euhhm-UVK85|1dY^#8inCl zn|X|YggbvF{bl=TF!Zq_;i48)Q(rAjmbTJL=opDLvNg!18_Tx^T?kUj(2>SE9StyF zGN6$xXfuySNa&vsgnia5G)K12(-shuw+$D-K#G=c2fCn)QihoAu};hm zFkeQ4Ms@@>C}XfT@z@FS$B9QF)8Sx63!-bsHZ@s0Ya7Vg1#4tikPA7M?*=+qyDLN1 z9#|)99L$$JK_lZqn|SO6X-_vnJ5w{K#=j{N#( zcvO!Qu+HNFFkcP?jT{7OAP&abBttK8hTD2<4Q=46Rx~f<#4)ypZd;4PA=(6Sn20rU zD98mI%MS-#9F9x~4O<3f|AOEwzAwoMrjhpc{{w%J4o5>%5->^W|L7$a$a!BEZ_zVm2h) z^xNOzbgH2t`jfqVj-7p_Ym?-BZ2?giV2#WHxouTo7M#V}0-}~+jVuMZb7T2+pbO6R z$`Ew})`_|i=F3f>k!7IGLT-kHVoLfovZ}IdVC@rzS=)B&u`Mgnf|VcjNGwS=;Y1BZ>RDacetIJ zsfFfVZ30#IVU4T=xrAf+1E34dgUV3#5Z0-B80O0(ppi#Go0dEV3DuPJW6aH)@IxWr zcEciCuyTh!iR+SGAJ@9wTYqgxj7({2yqafUo(7FP18TRQ#oD~%IY_wa$FGO+hQv=p zUBSxMx&-&bp4WWF*oNN(19{8xw?P;Aca$OKU91!HcbG5#0FAr{YGB^S z+EnD9khWBS3u2M~rhtr0>~5ngSo!V7iEEOje`z(8e1J9bA;^6i%Rd5LSUy&Ul25Qs z$)_-1J_C(>4%%eo3rI8>Y0FHRfNj60#0pyH_D|F`{YUHI{!81`S0Hz6EdK^{f%sM# z?!Uu2_us>O`2jTYBdCG+32W1jpCRFXAAK>hso1W|s!l<4cyhJj`9+&R)vs71{{^{Y zWBG5OQ}w$tRCVG%y*O0~7}x4SuGND!`{)b_)s*x!qQinpS{Thfn6nhD++!efwyBF& zMn_lM=GtH&-7N18y20t83>`hOPRBYhUwVN?)&(^>y|FeY=>rL~fM|5m@dIQ&?XwN7 zW41{2o4B46zYppl`VMq(J&CLmiv z!recTyE6juGhA1&vgcL_lJ|P8g^=M`BO^d=)>u9gbV9aPhLCNrPRO<}ZqkF?qz7&K z(EterM3<2)$$R^cUd+>6d~Yk%=4nP-Kup#)+z1Aevpf&FktrxcOcCqEYzOmY6li37 zP@}Q~)@CE4A+dvw@ZhJ>UQG_);YO||U%73fuC$}pK*SiVk)1%U)L6a?=tS(Q3=w0o zPQ-37?$m?asRwQLF%A-DAC>fy)!p3Xk#xH9_@XUpTL(Wf23+tr5U zAZ-Fw2V;$tKyK4mJ`r>gIaC>{4#PTChr_r{4|1Cxv?)guBvkVUDMwx}tVPo?Em(P5 zBTIYJB&~~#$+pQUAh&BQp9;F*9IXr)$6%d|GR&7_K_kb38l2;?HtCoK3Ag!ZaMEq3 zLbd6U)`He0#YEl26SNL8relrF0J&ddxd*yvh%#g}W1S2i#`SrS>+_(^GERYn?n|xl zm-~F^JOMzWER+b I($sDK4+G*?fB*mh diff --git a/demo/long_video_demo.py b/demo/long_video_demo.py index bb7e51a234..eea03348ff 100644 --- a/demo/long_video_demo.py +++ b/demo/long_video_demo.py @@ -216,7 +216,7 @@ def inference(model, data, args, frame_queue): result = inference_recognizer( model, cur_data, test_pipeline=args.test_pipeline) - scores = result.pred_scores.item.tolist() + scores = result.pred_score.tolist() if args.stride > 0: pred_stride = int(args.sample_length * args.stride) diff --git a/demo/mmaction2_tutorial.ipynb b/demo/mmaction2_tutorial.ipynb index 1a9d6ec70e..4d24a04d5e 100644 --- a/demo/mmaction2_tutorial.ipynb +++ b/demo/mmaction2_tutorial.ipynb @@ -1,1936 +1,1936 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "VcjSRFELVbNk" - }, - "source": [ - "# MMAction2 Tutorial\n", - "\n", - "Welcome to MMAction2! This is the official colab tutorial for using MMAction2. In this tutorial, you will learn\n", - "- Perform inference with a MMAction2 recognizer.\n", - "- Train a new recognizer with a new dataset.\n", - "\n", - "\n", - "Let's start!" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7LqHGkGEVqpm" - }, - "source": [ - "## Install MMAction2" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "Bf8PpPXtVvmg", - "outputId": "9d3f4594-f151-4ee9-a19b-09f8a439ac04" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "nvcc: NVIDIA (R) Cuda compiler driver\n", - "Copyright (c) 2005-2022 NVIDIA Corporation\n", - "Built on Wed_Sep_21_10:33:58_PDT_2022\n", - "Cuda compilation tools, release 11.8, V11.8.89\n", - "Build cuda_11.8.r11.8/compiler.31833905_0\n", - "gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\n", - "Copyright (C) 2019 Free Software Foundation, Inc.\n", - "This is free software; see the source for copying conditions. There is NO\n", - "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n", - "\n" - ] - } - ], - "source": [ - "# Check nvcc version\n", - "!nvcc -V\n", - "# Check GCC version\n", - "!gcc --version" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "id": "ZPwKGzqydnb2", - "outputId": "27506fa7-48a2-4fe0-d377-56f940dafec4", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Looking in indexes: https://download.pytorch.org/whl/cu118, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (2.0.0+cu118)\n", - "Requirement already satisfied: torchvision in /usr/local/lib/python3.10/dist-packages (0.15.1+cu118)\n", - "Requirement already satisfied: torchaudio in /usr/local/lib/python3.10/dist-packages (2.0.1+cu118)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch) (3.12.0)\n", - "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch) (4.5.0)\n", - "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch) (1.11.1)\n", - "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch) (3.1)\n", - "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch) (3.1.2)\n", - "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.10/dist-packages (from torch) (2.0.0)\n", - "Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch) (3.25.2)\n", - "Requirement already satisfied: lit in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch) (16.0.3)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from torchvision) (1.22.4)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from torchvision) (2.27.1)\n", - "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.10/dist-packages (from torchvision) (8.4.0)\n", - "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch) (2.1.2)\n", - "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (1.26.15)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (2022.12.7)\n", - "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (2.0.12)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (3.4)\n", - "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch) (1.3.0)\n" - ] - } - ], - "source": [ - "# install dependencies: (if your colab has CUDA 11.8)\n", - "%pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "5PAJ4ArzV5Ry", - "outputId": "eb8539a0-9524-4c48-f3e1-0b013ce0d344" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Collecting openmim\n", - " Downloading openmim-0.3.7-py2.py3-none-any.whl (51 kB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m51.3/51.3 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: Click in /usr/local/lib/python3.10/dist-packages (from openmim) (8.1.3)\n", - "Collecting colorama (from openmim)\n", - " Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n", - "Collecting model-index (from openmim)\n", - " Downloading model_index-0.1.11-py3-none-any.whl (34 kB)\n", - "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from openmim) (1.5.3)\n", - "Requirement already satisfied: pip>=19.3 in /usr/local/lib/python3.10/dist-packages (from openmim) (23.1.2)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from openmim) (2.27.1)\n", - "Requirement already satisfied: rich in /usr/local/lib/python3.10/dist-packages (from openmim) (13.3.4)\n", - "Requirement already satisfied: tabulate in /usr/local/lib/python3.10/dist-packages (from openmim) (0.8.10)\n", - "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from model-index->openmim) (6.0)\n", - "Requirement already satisfied: markdown in /usr/local/lib/python3.10/dist-packages (from model-index->openmim) (3.4.3)\n", - "Collecting ordered-set (from model-index->openmim)\n", - " Downloading ordered_set-4.1.0-py3-none-any.whl (7.6 kB)\n", - "Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from pandas->openmim) (2.8.2)\n", - "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->openmim) (2022.7.1)\n", - "Requirement already satisfied: numpy>=1.21.0 in /usr/local/lib/python3.10/dist-packages (from pandas->openmim) (1.22.4)\n", - "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->openmim) (1.26.15)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->openmim) (2022.12.7)\n", - "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests->openmim) (2.0.12)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->openmim) (3.4)\n", - "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich->openmim) (2.2.0)\n", - "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich->openmim) (2.14.0)\n", - "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->openmim) (0.1.2)\n", - "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.1->pandas->openmim) (1.16.0)\n", - "Installing collected packages: ordered-set, colorama, model-index, openmim\n", - "Successfully installed colorama-0.4.6 model-index-0.1.11 openmim-0.3.7 ordered-set-4.1.0\n", - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", - "Collecting mmengine\n", - " Downloading mmengine-0.7.3-py3-none-any.whl (372 kB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m372.1/372.1 kB\u001b[0m \u001b[31m20.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting addict (from mmengine)\n", - " Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n", - "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from mmengine) (3.7.1)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from mmengine) (1.22.4)\n", - "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from mmengine) (6.0)\n", - "Requirement already satisfied: rich in /usr/local/lib/python3.10/dist-packages (from mmengine) (13.3.4)\n", - "Requirement already satisfied: termcolor in /usr/local/lib/python3.10/dist-packages (from mmengine) (2.3.0)\n", - "Collecting yapf (from mmengine)\n", - " Downloading yapf-0.33.0-py2.py3-none-any.whl (200 kB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m200.9/200.9 kB\u001b[0m \u001b[31m21.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.10/dist-packages (from mmengine) (4.7.0.72)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (1.0.7)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (0.11.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (4.39.3)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (1.4.4)\n", - "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (23.1)\n", - "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (8.4.0)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (3.0.9)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (2.8.2)\n", - "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich->mmengine) (2.2.0)\n", - "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich->mmengine) (2.14.0)\n", - "Requirement already satisfied: tomli>=2.0.1 in /usr/local/lib/python3.10/dist-packages (from yapf->mmengine) (2.0.1)\n", - "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine) (0.1.2)\n", - "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib->mmengine) (1.16.0)\n", - "Installing collected packages: addict, yapf, mmengine\n", - "Successfully installed addict-2.4.0 mmengine-0.7.3 yapf-0.33.0\n", - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", - "Collecting mmcv>=2.0.0\n", - " Downloading https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/mmcv-2.0.0-cp310-cp310-manylinux1_x86_64.whl (74.4 MB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m74.4/74.4 MB\u001b[0m \u001b[31m9.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: addict in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (2.4.0)\n", - "Requirement already satisfied: mmengine>=0.2.0 in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (0.7.3)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (1.22.4)\n", - "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (23.1)\n", - "Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (8.4.0)\n", - "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (6.0)\n", - "Requirement already satisfied: yapf in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (0.33.0)\n", - "Requirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (4.7.0.72)\n", - "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0) (3.7.1)\n", - "Requirement already satisfied: rich in /usr/local/lib/python3.10/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0) (13.3.4)\n", - "Requirement already satisfied: termcolor in /usr/local/lib/python3.10/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0) (2.3.0)\n", - "Requirement already satisfied: tomli>=2.0.1 in /usr/local/lib/python3.10/dist-packages (from yapf->mmcv>=2.0.0) (2.0.1)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (1.0.7)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (0.11.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (4.39.3)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (1.4.4)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (3.0.9)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (2.8.2)\n", - "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich->mmengine>=0.2.0->mmcv>=2.0.0) (2.2.0)\n", - "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich->mmengine>=0.2.0->mmcv>=2.0.0) (2.14.0)\n", - "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine>=0.2.0->mmcv>=2.0.0) (0.1.2)\n", - "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (1.16.0)\n", - "Installing collected packages: mmcv\n", - "Successfully installed mmcv-2.0.0\n", - "Cloning into 'mmaction2'...\n", - "remote: Enumerating objects: 21284, done.\u001b[K\n", - "remote: Counting objects: 100% (394/394), done.\u001b[K\n", - "remote: Compressing objects: 100% (287/287), done.\u001b[K\n", - "remote: Total 21284 (delta 175), reused 248 (delta 103), pack-reused 20890\u001b[K\n", - "Receiving objects: 100% (21284/21284), 68.63 MiB | 16.59 MiB/s, done.\n", - "Resolving deltas: 100% (14990/14990), done.\n", - "/content/mmaction2\n", - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Obtaining file:///content/mmaction2\n", - " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Collecting decord>=0.4.1 (from mmaction2==1.0.0)\n", - " Downloading decord-0.6.0-py3-none-manylinux2010_x86_64.whl (13.6 MB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m13.6/13.6 MB\u001b[0m \u001b[31m76.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting einops (from mmaction2==1.0.0)\n", - " Downloading einops-0.6.1-py3-none-any.whl (42 kB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m42.2/42.2 kB\u001b[0m \u001b[31m4.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (3.7.1)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (1.22.4)\n", - "Requirement already satisfied: opencv-contrib-python in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (4.7.0.72)\n", - "Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (8.4.0)\n", - "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (1.10.1)\n", - "Requirement already satisfied: torch>=1.3 in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (2.0.0+cu118)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (3.12.0)\n", - "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (4.5.0)\n", - "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (1.11.1)\n", - "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (3.1)\n", - "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (3.1.2)\n", - "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (2.0.0)\n", - "Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch>=1.3->mmaction2==1.0.0) (3.25.2)\n", - "Requirement already satisfied: lit in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch>=1.3->mmaction2==1.0.0) (16.0.3)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (1.0.7)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (0.11.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (4.39.3)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (1.4.4)\n", - "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (23.1)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (3.0.9)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (2.8.2)\n", - "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib->mmaction2==1.0.0) (1.16.0)\n", - "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.3->mmaction2==1.0.0) (2.1.2)\n", - "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.3->mmaction2==1.0.0) (1.3.0)\n", - "Installing collected packages: einops, decord, mmaction2\n", - " Running setup.py develop for mmaction2\n", - "Successfully installed decord-0.6.0 einops-0.6.1 mmaction2-1.0.0\n", - "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Collecting av>=9.0 (from -r requirements/optional.txt (line 1))\n", - " Downloading av-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (31.0 MB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m31.0/31.0 MB\u001b[0m \u001b[31m38.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: future in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 2)) (0.18.3)\n", - "Collecting fvcore (from -r requirements/optional.txt (line 3))\n", - " Downloading fvcore-0.1.5.post20221221.tar.gz (50 kB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m50.2/50.2 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: imgaug in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 4)) (0.4.0)\n", - "Requirement already satisfied: librosa in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 5)) (0.10.0.post2)\n", - "Collecting lmdb (from -r requirements/optional.txt (line 6))\n", - " Downloading lmdb-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (299 kB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m299.2/299.2 kB\u001b[0m \u001b[31m30.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: moviepy in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 7)) (1.0.3)\n", - "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 8)) (23.1)\n", - "Collecting pims (from -r requirements/optional.txt (line 9))\n", - " Downloading PIMS-0.6.1.tar.gz (86 kB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m86.0/86.0 kB\u001b[0m \u001b[31m12.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Collecting PyTurboJPEG (from -r requirements/optional.txt (line 10))\n", - " Downloading PyTurboJPEG-1.7.1.tar.gz (11 kB)\n", - " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: soundfile in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 11)) (0.12.1)\n", - "Requirement already satisfied: tensorboard in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 12)) (2.12.2)\n", - "Collecting wandb (from -r requirements/optional.txt (line 13))\n", - " Downloading wandb-0.15.2-py3-none-any.whl (2.0 MB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m79.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (1.22.4)\n", - "Collecting yacs>=0.1.6 (from fvcore->-r requirements/optional.txt (line 3))\n", - " Downloading yacs-0.1.8-py3-none-any.whl (14 kB)\n", - "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (6.0)\n", - "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (4.65.0)\n", - "Requirement already satisfied: termcolor>=1.1 in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (2.3.0)\n", - "Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (8.4.0)\n", - "Requirement already satisfied: tabulate in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (0.8.10)\n", - "Collecting iopath>=0.1.7 (from fvcore->-r requirements/optional.txt (line 3))\n", - " Downloading iopath-0.1.10.tar.gz (42 kB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m42.2/42.2 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (1.16.0)\n", - "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (1.10.1)\n", - "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (3.7.1)\n", - "Requirement already satisfied: scikit-image>=0.14.2 in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (0.19.3)\n", - "Requirement already satisfied: opencv-python in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (4.7.0.72)\n", - "Requirement already satisfied: imageio in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (2.25.1)\n", - "Requirement already satisfied: Shapely in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (2.0.1)\n", - "Requirement already satisfied: audioread>=2.1.9 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (3.0.0)\n", - "Requirement already satisfied: scikit-learn>=0.20.0 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (1.2.2)\n", - "Requirement already satisfied: joblib>=0.14 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (1.2.0)\n", - "Requirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (4.4.2)\n", - "Requirement already satisfied: numba>=0.51.0 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (0.56.4)\n", - "Requirement already satisfied: pooch<1.7,>=1.0 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (1.6.0)\n", - "Requirement already satisfied: soxr>=0.3.2 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (0.3.5)\n", - "Requirement already satisfied: typing-extensions>=4.1.1 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (4.5.0)\n", - "Requirement already satisfied: lazy-loader>=0.1 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (0.2)\n", - "Requirement already satisfied: msgpack>=1.0 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (1.0.5)\n", - "Requirement already satisfied: requests<3.0,>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from moviepy->-r requirements/optional.txt (line 7)) (2.27.1)\n", - "Requirement already satisfied: proglog<=1.0.0 in /usr/local/lib/python3.10/dist-packages (from moviepy->-r requirements/optional.txt (line 7)) (0.1.10)\n", - "Requirement already satisfied: imageio-ffmpeg>=0.2.0 in /usr/local/lib/python3.10/dist-packages (from moviepy->-r requirements/optional.txt (line 7)) (0.4.8)\n", - "Collecting slicerator>=0.9.8 (from pims->-r requirements/optional.txt (line 9))\n", - " Downloading slicerator-1.1.0-py3-none-any.whl (10 kB)\n", - "Requirement already satisfied: cffi>=1.0 in /usr/local/lib/python3.10/dist-packages (from soundfile->-r requirements/optional.txt (line 11)) (1.15.1)\n", - "Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (1.4.0)\n", - "Requirement already satisfied: grpcio>=1.48.2 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (1.54.0)\n", - "Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (2.17.3)\n", - "Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (1.0.0)\n", - "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (3.4.3)\n", - "Requirement already satisfied: protobuf>=3.19.6 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (3.20.3)\n", - "Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (67.7.2)\n", - "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (0.7.0)\n", - "Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (1.8.1)\n", - "Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (2.3.0)\n", - "Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (0.40.0)\n", - "Requirement already satisfied: Click!=8.0.0,>=7.0 in /usr/local/lib/python3.10/dist-packages (from wandb->-r requirements/optional.txt (line 13)) (8.1.3)\n", - "Collecting GitPython!=3.1.29,>=1.0.0 (from wandb->-r requirements/optional.txt (line 13))\n", - " Downloading GitPython-3.1.31-py3-none-any.whl (184 kB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m184.3/184.3 kB\u001b[0m \u001b[31m22.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: psutil>=5.0.0 in /usr/local/lib/python3.10/dist-packages (from wandb->-r requirements/optional.txt (line 13)) (5.9.5)\n", - "Collecting sentry-sdk>=1.0.0 (from wandb->-r requirements/optional.txt (line 13))\n", - " Downloading sentry_sdk-1.22.2-py2.py3-none-any.whl (203 kB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m203.3/203.3 kB\u001b[0m \u001b[31m25.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting docker-pycreds>=0.4.0 (from wandb->-r requirements/optional.txt (line 13))\n", - " Downloading docker_pycreds-0.4.0-py2.py3-none-any.whl (9.0 kB)\n", - "Collecting pathtools (from wandb->-r requirements/optional.txt (line 13))\n", - " Downloading pathtools-0.1.2.tar.gz (11 kB)\n", - " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - "Collecting setproctitle (from wandb->-r requirements/optional.txt (line 13))\n", - " Downloading setproctitle-1.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (30 kB)\n", - "Requirement already satisfied: appdirs>=1.4.3 in /usr/local/lib/python3.10/dist-packages (from wandb->-r requirements/optional.txt (line 13)) (1.4.4)\n", - "Requirement already satisfied: pycparser in /usr/local/lib/python3.10/dist-packages (from cffi>=1.0->soundfile->-r requirements/optional.txt (line 11)) (2.21)\n", - "Collecting gitdb<5,>=4.0.1 (from GitPython!=3.1.29,>=1.0.0->wandb->-r requirements/optional.txt (line 13))\n", - " Downloading gitdb-4.0.10-py3-none-any.whl (62 kB)\n", - "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m62.7/62.7 kB\u001b[0m \u001b[31m9.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hRequirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard->-r requirements/optional.txt (line 12)) (5.3.0)\n", - "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard->-r requirements/optional.txt (line 12)) (0.3.0)\n", - "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard->-r requirements/optional.txt (line 12)) (4.9)\n", - "Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard->-r requirements/optional.txt (line 12)) (1.3.1)\n", - "Collecting portalocker (from iopath>=0.1.7->fvcore->-r requirements/optional.txt (line 3))\n", - " Downloading portalocker-2.7.0-py2.py3-none-any.whl (15 kB)\n", - "Requirement already satisfied: llvmlite<0.40,>=0.39.0dev0 in /usr/local/lib/python3.10/dist-packages (from numba>=0.51.0->librosa->-r requirements/optional.txt (line 5)) (0.39.1)\n", - "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3.0,>=2.8.1->moviepy->-r requirements/optional.txt (line 7)) (1.26.15)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3.0,>=2.8.1->moviepy->-r requirements/optional.txt (line 7)) (2022.12.7)\n", - "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests<3.0,>=2.8.1->moviepy->-r requirements/optional.txt (line 7)) (2.0.12)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3.0,>=2.8.1->moviepy->-r requirements/optional.txt (line 7)) (3.4)\n", - "Requirement already satisfied: networkx>=2.2 in /usr/local/lib/python3.10/dist-packages (from scikit-image>=0.14.2->imgaug->-r requirements/optional.txt (line 4)) (3.1)\n", - "Requirement already satisfied: tifffile>=2019.7.26 in /usr/local/lib/python3.10/dist-packages (from scikit-image>=0.14.2->imgaug->-r requirements/optional.txt (line 4)) (2023.4.12)\n", - "Requirement already satisfied: PyWavelets>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-image>=0.14.2->imgaug->-r requirements/optional.txt (line 4)) (1.4.1)\n", - "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.20.0->librosa->-r requirements/optional.txt (line 5)) (3.1.0)\n", - "Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=1.0.1->tensorboard->-r requirements/optional.txt (line 12)) (2.1.2)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (1.0.7)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (0.11.0)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (4.39.3)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (1.4.4)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (3.0.9)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (2.8.2)\n", - "Collecting smmap<6,>=3.0.1 (from gitdb<5,>=4.0.1->GitPython!=3.1.29,>=1.0.0->wandb->-r requirements/optional.txt (line 13))\n", - " Downloading smmap-5.0.0-py3-none-any.whl (24 kB)\n", - "Requirement already satisfied: pyasn1<0.6.0,>=0.4.6 in /usr/local/lib/python3.10/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard->-r requirements/optional.txt (line 12)) (0.5.0)\n", - "Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard->-r requirements/optional.txt (line 12)) (3.2.2)\n", - "Building wheels for collected packages: fvcore, pims, PyTurboJPEG, iopath, pathtools\n", - " Building wheel for fvcore (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for fvcore: filename=fvcore-0.1.5.post20221221-py3-none-any.whl size=61405 sha256=25c1e50155c8788d00eec898793c96133a746a8bb076ffc5c01f5a4dc256751e\n", - " Stored in directory: /root/.cache/pip/wheels/01/c0/af/77c1cf53a1be9e42a52b48e5af2169d40ec2e89f7362489dd0\n", - " Building wheel for pims (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for pims: filename=PIMS-0.6.1-py3-none-any.whl size=82619 sha256=59a328dc88a438c60cfb6e937e04c8a7dd55ad2a2905034cd41ff80cdbba6497\n", - " Stored in directory: /root/.cache/pip/wheels/cc/bf/3e/bfa77232d942f8244145f9c713b6b38f6ef04b6fb5c021c114\n", - " Building wheel for PyTurboJPEG (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for PyTurboJPEG: filename=PyTurboJPEG-1.7.1-py3-none-any.whl size=12243 sha256=ddf6424c85ac533335abd96dd9e98b014ea1dd4f143c88cd35ecb08d6128f411\n", - " Stored in directory: /root/.cache/pip/wheels/de/6e/b1/e7ba70c328c3395555cb92ca8820babb32950d867858b1948b\n", - " Building wheel for iopath (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for iopath: filename=iopath-0.1.10-py3-none-any.whl size=31531 sha256=db977a4344bebbdd710665e767caab4fbcf53cc6aea0707cd38d26c45718331e\n", - " Stored in directory: /root/.cache/pip/wheels/9a/a3/b6/ac0fcd1b4ed5cfeb3db92e6a0e476cfd48ed0df92b91080c1d\n", - " Building wheel for pathtools (setup.py) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for pathtools: filename=pathtools-0.1.2-py3-none-any.whl size=8791 sha256=08bb5753ce029aef01f25c3e81882d93c0e040e5932e90a02a062ad058756b52\n", - " Stored in directory: /root/.cache/pip/wheels/e7/f3/22/152153d6eb222ee7a56ff8617d80ee5207207a8c00a7aab794\n", - "Successfully built fvcore pims PyTurboJPEG iopath pathtools\n", - "Installing collected packages: slicerator, pathtools, lmdb, av, yacs, smmap, setproctitle, sentry-sdk, PyTurboJPEG, portalocker, docker-pycreds, pims, iopath, gitdb, GitPython, fvcore, wandb\n", - "Successfully installed GitPython-3.1.31 PyTurboJPEG-1.7.1 av-10.0.0 docker-pycreds-0.4.0 fvcore-0.1.5.post20221221 gitdb-4.0.10 iopath-0.1.10 lmdb-1.4.1 pathtools-0.1.2 pims-0.6.1 portalocker-2.7.0 sentry-sdk-1.22.2 setproctitle-1.3.2 slicerator-1.1.0 smmap-5.0.0 wandb-0.15.2 yacs-0.1.8\n" - ] - } - ], - "source": [ - "# install MMEngine, MMCV and MMDetection using MIM\n", - "%pip install -U openmim\n", - "!mim install mmengine\n", - "!mim install \"mmcv>=2.0.0\"\n", - "\n", - "# Install mmaction2\n", - "!rm -rf mmaction2\n", - "!git clone https://github.com/open-mmlab/mmaction2.git -b main\n", - "%cd mmaction2\n", - "\n", - "!pip install -e .\n", - "\n", - "# Install some optional requirements\n", - "!pip install -r requirements/optional.txt" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "No_zZAFpWC-a", - "outputId": "9386dd81-2308-4adb-d3cb-798de11c035e" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "2.0.0+cu118 True\n", - "1.0.0\n", - "11.8\n", - "GCC 9.3\n", - "OrderedDict([('sys.platform', 'linux'), ('Python', '3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]'), ('CUDA available', True), ('numpy_random_seed', 2147483648), ('GPU 0', 'Tesla T4'), ('CUDA_HOME', '/usr/local/cuda'), ('NVCC', 'Cuda compilation tools, release 11.8, V11.8.89'), ('GCC', 'x86_64-linux-gnu-gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0'), ('PyTorch', '2.0.0+cu118'), ('PyTorch compiling details', 'PyTorch built with:\\n - GCC 9.3\\n - C++ Version: 201703\\n - Intel(R) oneAPI Math Kernel Library Version 2022.2-Product Build 20220804 for Intel(R) 64 architecture applications\\n - Intel(R) MKL-DNN v2.7.3 (Git Hash 6dbeffbae1f23cbbeae17adb7b5b13f1f37c080e)\\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\\n - LAPACK is enabled (usually provided by MKL)\\n - NNPACK is enabled\\n - CPU capability usage: AVX2\\n - CUDA Runtime 11.8\\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_90,code=sm_90\\n - CuDNN 8.7\\n - Magma 2.6.1\\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.8, CUDNN_VERSION=8.7.0, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -D_GLIBCXX_USE_CXX11_ABI=0 -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -O2 -fPIC -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Werror=bool-operation -Wnarrowing -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_DISABLE_GPU_ASSERTS=ON, TORCH_VERSION=2.0.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=1, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \\n'), ('TorchVision', '0.15.1+cu118'), ('OpenCV', '4.7.0'), ('MMEngine', '0.7.3')])\n" - ] - } - ], - "source": [ - "# Check Pytorch installation\n", - "import torch, torchvision\n", - "print(torch.__version__, torch.cuda.is_available())\n", - "\n", - "# Check MMAction2 installation\n", - "import mmaction\n", - "print(mmaction.__version__)\n", - "\n", - "# Check MMCV installation\n", - "from mmcv.ops import get_compiling_cuda_version, get_compiler_version\n", - "print(get_compiling_cuda_version())\n", - "print(get_compiler_version())\n", - "\n", - "# Check MMEngine installation\n", - "from mmengine.utils.dl_utils import collect_env\n", - "print(collect_env())" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pXf7oV5DWdab" - }, - "source": [ - "## Perform inference with a MMAction2 recognizer\n", - "MMAction2 already provides high level APIs to do inference and training." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "64CW6d_AaT-Q", - "outputId": "ea330d8c-2e20-4dbd-d046-51d7c9ec4f7a" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "--2023-05-15 03:33:08-- https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth\n", - "Resolving download.openmmlab.com (download.openmmlab.com)... 163.181.82.216, 163.181.82.218, 163.181.82.213, ...\n", - "Connecting to download.openmmlab.com (download.openmmlab.com)|163.181.82.216|:443... connected.\n", - "HTTP request sent, awaiting response... 200 OK\n", - "Length: 97579339 (93M) [application/octet-stream]\n", - "Saving to: โ€˜checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pthโ€™\n", - "\n", - "checkpoints/tsn_r50 100%[===================>] 93.06M 26.1MB/s in 3.6s \n", - "\n", - "2023-05-15 03:33:12 (26.2 MB/s) - โ€˜checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pthโ€™ saved [97579339/97579339]\n", - "\n" - ] - } - ], - "source": [ - "!mkdir checkpoints\n", - "!wget -c https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \\\n", - " -O checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "VcjSRFELVbNk" + }, + "source": [ + "# MMAction2 Tutorial\n", + "\n", + "Welcome to MMAction2! This is the official colab tutorial for using MMAction2. In this tutorial, you will learn\n", + "- Perform inference with a MMAction2 recognizer.\n", + "- Train a new recognizer with a new dataset.\n", + "\n", + "\n", + "Let's start!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7LqHGkGEVqpm" + }, + "source": [ + "## Install MMAction2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "Bf8PpPXtVvmg", + "outputId": "9d3f4594-f151-4ee9-a19b-09f8a439ac04" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "HNZB7NoSabzj", - "outputId": "c0c2ba71-72ff-4cac-a5b8-65590f5a6bb0" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Loads checkpoint by local backend from path: checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth\n" - ] - } - ], - "source": [ - "from mmaction.apis import inference_recognizer, init_recognizer\n", - "from mmengine import Config\n", - "\n", - "\n", - "# Choose to use a config and initialize the recognizer\n", - "config = 'configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py'\n", - "config = Config.fromfile(config)\n", - "# Setup a checkpoint file to load\n", - "checkpoint = 'checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth'\n", - "# Initialize the recognizer\n", - "model = init_recognizer(config, checkpoint, device='cuda:0')" - ] - }, + "output_type": "stream", + "name": "stdout", + "text": [ + "nvcc: NVIDIA (R) Cuda compiler driver\n", + "Copyright (c) 2005-2022 NVIDIA Corporation\n", + "Built on Wed_Sep_21_10:33:58_PDT_2022\n", + "Cuda compilation tools, release 11.8, V11.8.89\n", + "Build cuda_11.8.r11.8/compiler.31833905_0\n", + "gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\n", + "Copyright (C) 2019 Free Software Foundation, Inc.\n", + "This is free software; see the source for copying conditions. There is NO\n", + "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n", + "\n" + ] + } + ], + "source": [ + "# Check nvcc version\n", + "!nvcc -V\n", + "# Check GCC version\n", + "!gcc --version" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "ZPwKGzqydnb2", + "outputId": "27506fa7-48a2-4fe0-d377-56f940dafec4", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "outputs": [ { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "id": "rEMsBnpHapAn", - "outputId": "ec05049e-7289-4798-94fa-2b773cb23634", - "colab": { - "base_uri": "https://localhost:8080/" - } - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "05/15 03:33:18 - mmengine - WARNING - \"FileClient\" will be deprecated in future. Please use io functions in https://mmengine.readthedocs.io/en/latest/api/fileio.html#file-io\n", - "05/15 03:33:18 - mmengine - WARNING - \"HardDiskBackend\" is the alias of \"LocalBackend\" and the former will be deprecated in future.\n" - ] - } - ], - "source": [ - "# Use the recognizer to do inference\n", - "from operator import itemgetter\n", - "video = 'demo/demo.mp4'\n", - "label = 'tools/data/kinetics/label_map_k400.txt'\n", - "results = inference_recognizer(model, video)\n", - "\n", - "pred_scores = results.pred_scores.item.tolist()\n", - "score_tuples = tuple(zip(range(len(pred_scores)), pred_scores))\n", - "score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True)\n", - "top5_label = score_sorted[:5]\n", - "\n", - "labels = open(label).readlines()\n", - "labels = [x.strip() for x in labels]\n", - "results = [(labels[k[0]], k[1]) for k in top5_label]\n" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "Looking in indexes: https://download.pytorch.org/whl/cu118, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (2.0.0+cu118)\n", + "Requirement already satisfied: torchvision in /usr/local/lib/python3.10/dist-packages (0.15.1+cu118)\n", + "Requirement already satisfied: torchaudio in /usr/local/lib/python3.10/dist-packages (2.0.1+cu118)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch) (3.12.0)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch) (4.5.0)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch) (1.11.1)\n", + "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch) (3.1)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch) (3.1.2)\n", + "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.10/dist-packages (from torch) (2.0.0)\n", + "Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch) (3.25.2)\n", + "Requirement already satisfied: lit in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch) (16.0.3)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from torchvision) (1.22.4)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from torchvision) (2.27.1)\n", + "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.10/dist-packages (from torchvision) (8.4.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch) (2.1.2)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (1.26.15)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (2022.12.7)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (2.0.12)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (3.4)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch) (1.3.0)\n" + ] + } + ], + "source": [ + "# install dependencies: (if your colab has CUDA 11.8)\n", + "%pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "5PAJ4ArzV5Ry", + "outputId": "eb8539a0-9524-4c48-f3e1-0b013ce0d344" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "NIyJXqfWathq", - "outputId": "cb25aca9-e72d-4c54-f295-4c889713cb3a" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "The top-5 labels with corresponding scores are:\n", - "arm wrestling: 1.0\n", - "rock scissors paper: 6.434453414527752e-09\n", - "shaking hands: 2.7599860175087088e-09\n", - "clapping: 1.3454612979302283e-09\n", - "massaging feet: 5.555100823784187e-10\n" - ] - } - ], - "source": [ - "print('The top-5 labels with corresponding scores are:')\n", - "for result in results:\n", - " print(f'{result[0]}: ', result[1])" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Collecting openmim\n", + " Downloading openmim-0.3.7-py2.py3-none-any.whl (51 kB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m51.3/51.3 kB\u001B[0m \u001B[31m4.0 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hRequirement already satisfied: Click in /usr/local/lib/python3.10/dist-packages (from openmim) (8.1.3)\n", + "Collecting colorama (from openmim)\n", + " Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n", + "Collecting model-index (from openmim)\n", + " Downloading model_index-0.1.11-py3-none-any.whl (34 kB)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from openmim) (1.5.3)\n", + "Requirement already satisfied: pip>=19.3 in /usr/local/lib/python3.10/dist-packages (from openmim) (23.1.2)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from openmim) (2.27.1)\n", + "Requirement already satisfied: rich in /usr/local/lib/python3.10/dist-packages (from openmim) (13.3.4)\n", + "Requirement already satisfied: tabulate in /usr/local/lib/python3.10/dist-packages (from openmim) (0.8.10)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from model-index->openmim) (6.0)\n", + "Requirement already satisfied: markdown in /usr/local/lib/python3.10/dist-packages (from model-index->openmim) (3.4.3)\n", + "Collecting ordered-set (from model-index->openmim)\n", + " Downloading ordered_set-4.1.0-py3-none-any.whl (7.6 kB)\n", + "Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from pandas->openmim) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->openmim) (2022.7.1)\n", + "Requirement already satisfied: numpy>=1.21.0 in /usr/local/lib/python3.10/dist-packages (from pandas->openmim) (1.22.4)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->openmim) (1.26.15)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->openmim) (2022.12.7)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests->openmim) (2.0.12)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->openmim) (3.4)\n", + "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich->openmim) (2.2.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich->openmim) (2.14.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->openmim) (0.1.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.1->pandas->openmim) (1.16.0)\n", + "Installing collected packages: ordered-set, colorama, model-index, openmim\n", + "Successfully installed colorama-0.4.6 model-index-0.1.11 openmim-0.3.7 ordered-set-4.1.0\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", + "Collecting mmengine\n", + " Downloading mmengine-0.7.3-py3-none-any.whl (372 kB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m372.1/372.1 kB\u001B[0m \u001B[31m20.5 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hCollecting addict (from mmengine)\n", + " Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from mmengine) (3.7.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from mmengine) (1.22.4)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from mmengine) (6.0)\n", + "Requirement already satisfied: rich in /usr/local/lib/python3.10/dist-packages (from mmengine) (13.3.4)\n", + "Requirement already satisfied: termcolor in /usr/local/lib/python3.10/dist-packages (from mmengine) (2.3.0)\n", + "Collecting yapf (from mmengine)\n", + " Downloading yapf-0.33.0-py2.py3-none-any.whl (200 kB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m200.9/200.9 kB\u001B[0m \u001B[31m21.0 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hRequirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.10/dist-packages (from mmengine) (4.7.0.72)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (1.0.7)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (0.11.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (4.39.3)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (1.4.4)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (23.1)\n", + "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (8.4.0)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (3.0.9)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine) (2.8.2)\n", + "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich->mmengine) (2.2.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich->mmengine) (2.14.0)\n", + "Requirement already satisfied: tomli>=2.0.1 in /usr/local/lib/python3.10/dist-packages (from yapf->mmengine) (2.0.1)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine) (0.1.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib->mmengine) (1.16.0)\n", + "Installing collected packages: addict, yapf, mmengine\n", + "Successfully installed addict-2.4.0 mmengine-0.7.3 yapf-0.33.0\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Looking in links: https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/index.html\n", + "Collecting mmcv>=2.0.0\n", + " Downloading https://download.openmmlab.com/mmcv/dist/cu118/torch2.0.0/mmcv-2.0.0-cp310-cp310-manylinux1_x86_64.whl (74.4 MB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m74.4/74.4 MB\u001B[0m \u001B[31m9.7 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hRequirement already satisfied: addict in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (2.4.0)\n", + "Requirement already satisfied: mmengine>=0.2.0 in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (0.7.3)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (1.22.4)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (23.1)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (8.4.0)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (6.0)\n", + "Requirement already satisfied: yapf in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (0.33.0)\n", + "Requirement already satisfied: opencv-python>=3 in /usr/local/lib/python3.10/dist-packages (from mmcv>=2.0.0) (4.7.0.72)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0) (3.7.1)\n", + "Requirement already satisfied: rich in /usr/local/lib/python3.10/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0) (13.3.4)\n", + "Requirement already satisfied: termcolor in /usr/local/lib/python3.10/dist-packages (from mmengine>=0.2.0->mmcv>=2.0.0) (2.3.0)\n", + "Requirement already satisfied: tomli>=2.0.1 in /usr/local/lib/python3.10/dist-packages (from yapf->mmcv>=2.0.0) (2.0.1)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (1.0.7)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (0.11.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (4.39.3)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (1.4.4)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (3.0.9)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (2.8.2)\n", + "Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich->mmengine>=0.2.0->mmcv>=2.0.0) (2.2.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich->mmengine>=0.2.0->mmcv>=2.0.0) (2.14.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py<3.0.0,>=2.2.0->rich->mmengine>=0.2.0->mmcv>=2.0.0) (0.1.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib->mmengine>=0.2.0->mmcv>=2.0.0) (1.16.0)\n", + "Installing collected packages: mmcv\n", + "Successfully installed mmcv-2.0.0\n", + "Cloning into 'mmaction2'...\n", + "remote: Enumerating objects: 21284, done.\u001B[K\n", + "remote: Counting objects: 100% (394/394), done.\u001B[K\n", + "remote: Compressing objects: 100% (287/287), done.\u001B[K\n", + "remote: Total 21284 (delta 175), reused 248 (delta 103), pack-reused 20890\u001B[K\n", + "Receiving objects: 100% (21284/21284), 68.63 MiB | 16.59 MiB/s, done.\n", + "Resolving deltas: 100% (14990/14990), done.\n", + "/content/mmaction2\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Obtaining file:///content/mmaction2\n", + " Preparing metadata (setup.py) ... \u001B[?25l\u001B[?25hdone\n", + "Collecting decord>=0.4.1 (from mmaction2==1.0.0)\n", + " Downloading decord-0.6.0-py3-none-manylinux2010_x86_64.whl (13.6 MB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m13.6/13.6 MB\u001B[0m \u001B[31m76.9 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hCollecting einops (from mmaction2==1.0.0)\n", + " Downloading einops-0.6.1-py3-none-any.whl (42 kB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m42.2/42.2 kB\u001B[0m \u001B[31m4.5 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hRequirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (3.7.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (1.22.4)\n", + "Requirement already satisfied: opencv-contrib-python in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (4.7.0.72)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (8.4.0)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (1.10.1)\n", + "Requirement already satisfied: torch>=1.3 in /usr/local/lib/python3.10/dist-packages (from mmaction2==1.0.0) (2.0.0+cu118)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (3.12.0)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (4.5.0)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (1.11.1)\n", + "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (3.1)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (3.1.2)\n", + "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.3->mmaction2==1.0.0) (2.0.0)\n", + "Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch>=1.3->mmaction2==1.0.0) (3.25.2)\n", + "Requirement already satisfied: lit in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch>=1.3->mmaction2==1.0.0) (16.0.3)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (1.0.7)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (0.11.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (4.39.3)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (1.4.4)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (23.1)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (3.0.9)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->mmaction2==1.0.0) (2.8.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib->mmaction2==1.0.0) (1.16.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.3->mmaction2==1.0.0) (2.1.2)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.3->mmaction2==1.0.0) (1.3.0)\n", + "Installing collected packages: einops, decord, mmaction2\n", + " Running setup.py develop for mmaction2\n", + "Successfully installed decord-0.6.0 einops-0.6.1 mmaction2-1.0.0\n", + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Collecting av>=9.0 (from -r requirements/optional.txt (line 1))\n", + " Downloading av-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (31.0 MB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m31.0/31.0 MB\u001B[0m \u001B[31m38.3 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hRequirement already satisfied: future in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 2)) (0.18.3)\n", + "Collecting fvcore (from -r requirements/optional.txt (line 3))\n", + " Downloading fvcore-0.1.5.post20221221.tar.gz (50 kB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m50.2/50.2 kB\u001B[0m \u001B[31m6.7 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25h Preparing metadata (setup.py) ... \u001B[?25l\u001B[?25hdone\n", + "Requirement already satisfied: imgaug in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 4)) (0.4.0)\n", + "Requirement already satisfied: librosa in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 5)) (0.10.0.post2)\n", + "Collecting lmdb (from -r requirements/optional.txt (line 6))\n", + " Downloading lmdb-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (299 kB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m299.2/299.2 kB\u001B[0m \u001B[31m30.2 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hRequirement already satisfied: moviepy in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 7)) (1.0.3)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 8)) (23.1)\n", + "Collecting pims (from -r requirements/optional.txt (line 9))\n", + " Downloading PIMS-0.6.1.tar.gz (86 kB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m86.0/86.0 kB\u001B[0m \u001B[31m12.2 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25h Preparing metadata (setup.py) ... \u001B[?25l\u001B[?25hdone\n", + "Collecting PyTurboJPEG (from -r requirements/optional.txt (line 10))\n", + " Downloading PyTurboJPEG-1.7.1.tar.gz (11 kB)\n", + " Preparing metadata (setup.py) ... \u001B[?25l\u001B[?25hdone\n", + "Requirement already satisfied: soundfile in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 11)) (0.12.1)\n", + "Requirement already satisfied: tensorboard in /usr/local/lib/python3.10/dist-packages (from -r requirements/optional.txt (line 12)) (2.12.2)\n", + "Collecting wandb (from -r requirements/optional.txt (line 13))\n", + " Downloading wandb-0.15.2-py3-none-any.whl (2.0 MB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m2.0/2.0 MB\u001B[0m \u001B[31m79.1 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (1.22.4)\n", + "Collecting yacs>=0.1.6 (from fvcore->-r requirements/optional.txt (line 3))\n", + " Downloading yacs-0.1.8-py3-none-any.whl (14 kB)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (6.0)\n", + "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (4.65.0)\n", + "Requirement already satisfied: termcolor>=1.1 in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (2.3.0)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (8.4.0)\n", + "Requirement already satisfied: tabulate in /usr/local/lib/python3.10/dist-packages (from fvcore->-r requirements/optional.txt (line 3)) (0.8.10)\n", + "Collecting iopath>=0.1.7 (from fvcore->-r requirements/optional.txt (line 3))\n", + " Downloading iopath-0.1.10.tar.gz (42 kB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m42.2/42.2 kB\u001B[0m \u001B[31m4.8 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25h Preparing metadata (setup.py) ... \u001B[?25l\u001B[?25hdone\n", + "Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (1.16.0)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (1.10.1)\n", + "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (3.7.1)\n", + "Requirement already satisfied: scikit-image>=0.14.2 in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (0.19.3)\n", + "Requirement already satisfied: opencv-python in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (4.7.0.72)\n", + "Requirement already satisfied: imageio in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (2.25.1)\n", + "Requirement already satisfied: Shapely in /usr/local/lib/python3.10/dist-packages (from imgaug->-r requirements/optional.txt (line 4)) (2.0.1)\n", + "Requirement already satisfied: audioread>=2.1.9 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (3.0.0)\n", + "Requirement already satisfied: scikit-learn>=0.20.0 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (1.2.2)\n", + "Requirement already satisfied: joblib>=0.14 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (1.2.0)\n", + "Requirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (4.4.2)\n", + "Requirement already satisfied: numba>=0.51.0 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (0.56.4)\n", + "Requirement already satisfied: pooch<1.7,>=1.0 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (1.6.0)\n", + "Requirement already satisfied: soxr>=0.3.2 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (0.3.5)\n", + "Requirement already satisfied: typing-extensions>=4.1.1 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (4.5.0)\n", + "Requirement already satisfied: lazy-loader>=0.1 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (0.2)\n", + "Requirement already satisfied: msgpack>=1.0 in /usr/local/lib/python3.10/dist-packages (from librosa->-r requirements/optional.txt (line 5)) (1.0.5)\n", + "Requirement already satisfied: requests<3.0,>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from moviepy->-r requirements/optional.txt (line 7)) (2.27.1)\n", + "Requirement already satisfied: proglog<=1.0.0 in /usr/local/lib/python3.10/dist-packages (from moviepy->-r requirements/optional.txt (line 7)) (0.1.10)\n", + "Requirement already satisfied: imageio-ffmpeg>=0.2.0 in /usr/local/lib/python3.10/dist-packages (from moviepy->-r requirements/optional.txt (line 7)) (0.4.8)\n", + "Collecting slicerator>=0.9.8 (from pims->-r requirements/optional.txt (line 9))\n", + " Downloading slicerator-1.1.0-py3-none-any.whl (10 kB)\n", + "Requirement already satisfied: cffi>=1.0 in /usr/local/lib/python3.10/dist-packages (from soundfile->-r requirements/optional.txt (line 11)) (1.15.1)\n", + "Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (1.4.0)\n", + "Requirement already satisfied: grpcio>=1.48.2 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (1.54.0)\n", + "Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (2.17.3)\n", + "Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (1.0.0)\n", + "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (3.4.3)\n", + "Requirement already satisfied: protobuf>=3.19.6 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (3.20.3)\n", + "Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (67.7.2)\n", + "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (0.7.0)\n", + "Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (1.8.1)\n", + "Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (2.3.0)\n", + "Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements/optional.txt (line 12)) (0.40.0)\n", + "Requirement already satisfied: Click!=8.0.0,>=7.0 in /usr/local/lib/python3.10/dist-packages (from wandb->-r requirements/optional.txt (line 13)) (8.1.3)\n", + "Collecting GitPython!=3.1.29,>=1.0.0 (from wandb->-r requirements/optional.txt (line 13))\n", + " Downloading GitPython-3.1.31-py3-none-any.whl (184 kB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m184.3/184.3 kB\u001B[0m \u001B[31m22.9 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hRequirement already satisfied: psutil>=5.0.0 in /usr/local/lib/python3.10/dist-packages (from wandb->-r requirements/optional.txt (line 13)) (5.9.5)\n", + "Collecting sentry-sdk>=1.0.0 (from wandb->-r requirements/optional.txt (line 13))\n", + " Downloading sentry_sdk-1.22.2-py2.py3-none-any.whl (203 kB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m203.3/203.3 kB\u001B[0m \u001B[31m25.7 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hCollecting docker-pycreds>=0.4.0 (from wandb->-r requirements/optional.txt (line 13))\n", + " Downloading docker_pycreds-0.4.0-py2.py3-none-any.whl (9.0 kB)\n", + "Collecting pathtools (from wandb->-r requirements/optional.txt (line 13))\n", + " Downloading pathtools-0.1.2.tar.gz (11 kB)\n", + " Preparing metadata (setup.py) ... \u001B[?25l\u001B[?25hdone\n", + "Collecting setproctitle (from wandb->-r requirements/optional.txt (line 13))\n", + " Downloading setproctitle-1.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (30 kB)\n", + "Requirement already satisfied: appdirs>=1.4.3 in /usr/local/lib/python3.10/dist-packages (from wandb->-r requirements/optional.txt (line 13)) (1.4.4)\n", + "Requirement already satisfied: pycparser in /usr/local/lib/python3.10/dist-packages (from cffi>=1.0->soundfile->-r requirements/optional.txt (line 11)) (2.21)\n", + "Collecting gitdb<5,>=4.0.1 (from GitPython!=3.1.29,>=1.0.0->wandb->-r requirements/optional.txt (line 13))\n", + " Downloading gitdb-4.0.10-py3-none-any.whl (62 kB)\n", + "\u001B[2K \u001B[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001B[0m \u001B[32m62.7/62.7 kB\u001B[0m \u001B[31m9.0 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m\n", + "\u001B[?25hRequirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard->-r requirements/optional.txt (line 12)) (5.3.0)\n", + "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard->-r requirements/optional.txt (line 12)) (0.3.0)\n", + "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.10/dist-packages (from google-auth<3,>=1.6.3->tensorboard->-r requirements/optional.txt (line 12)) (4.9)\n", + "Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard->-r requirements/optional.txt (line 12)) (1.3.1)\n", + "Collecting portalocker (from iopath>=0.1.7->fvcore->-r requirements/optional.txt (line 3))\n", + " Downloading portalocker-2.7.0-py2.py3-none-any.whl (15 kB)\n", + "Requirement already satisfied: llvmlite<0.40,>=0.39.0dev0 in /usr/local/lib/python3.10/dist-packages (from numba>=0.51.0->librosa->-r requirements/optional.txt (line 5)) (0.39.1)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3.0,>=2.8.1->moviepy->-r requirements/optional.txt (line 7)) (1.26.15)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3.0,>=2.8.1->moviepy->-r requirements/optional.txt (line 7)) (2022.12.7)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests<3.0,>=2.8.1->moviepy->-r requirements/optional.txt (line 7)) (2.0.12)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3.0,>=2.8.1->moviepy->-r requirements/optional.txt (line 7)) (3.4)\n", + "Requirement already satisfied: networkx>=2.2 in /usr/local/lib/python3.10/dist-packages (from scikit-image>=0.14.2->imgaug->-r requirements/optional.txt (line 4)) (3.1)\n", + "Requirement already satisfied: tifffile>=2019.7.26 in /usr/local/lib/python3.10/dist-packages (from scikit-image>=0.14.2->imgaug->-r requirements/optional.txt (line 4)) (2023.4.12)\n", + "Requirement already satisfied: PyWavelets>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-image>=0.14.2->imgaug->-r requirements/optional.txt (line 4)) (1.4.1)\n", + "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.20.0->librosa->-r requirements/optional.txt (line 5)) (3.1.0)\n", + "Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=1.0.1->tensorboard->-r requirements/optional.txt (line 12)) (2.1.2)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (1.0.7)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (0.11.0)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (4.39.3)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (1.4.4)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (3.0.9)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->imgaug->-r requirements/optional.txt (line 4)) (2.8.2)\n", + "Collecting smmap<6,>=3.0.1 (from gitdb<5,>=4.0.1->GitPython!=3.1.29,>=1.0.0->wandb->-r requirements/optional.txt (line 13))\n", + " Downloading smmap-5.0.0-py3-none-any.whl (24 kB)\n", + "Requirement already satisfied: pyasn1<0.6.0,>=0.4.6 in /usr/local/lib/python3.10/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard->-r requirements/optional.txt (line 12)) (0.5.0)\n", + "Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard->-r requirements/optional.txt (line 12)) (3.2.2)\n", + "Building wheels for collected packages: fvcore, pims, PyTurboJPEG, iopath, pathtools\n", + " Building wheel for fvcore (setup.py) ... \u001B[?25l\u001B[?25hdone\n", + " Created wheel for fvcore: filename=fvcore-0.1.5.post20221221-py3-none-any.whl size=61405 sha256=25c1e50155c8788d00eec898793c96133a746a8bb076ffc5c01f5a4dc256751e\n", + " Stored in directory: /root/.cache/pip/wheels/01/c0/af/77c1cf53a1be9e42a52b48e5af2169d40ec2e89f7362489dd0\n", + " Building wheel for pims (setup.py) ... \u001B[?25l\u001B[?25hdone\n", + " Created wheel for pims: filename=PIMS-0.6.1-py3-none-any.whl size=82619 sha256=59a328dc88a438c60cfb6e937e04c8a7dd55ad2a2905034cd41ff80cdbba6497\n", + " Stored in directory: /root/.cache/pip/wheels/cc/bf/3e/bfa77232d942f8244145f9c713b6b38f6ef04b6fb5c021c114\n", + " Building wheel for PyTurboJPEG (setup.py) ... \u001B[?25l\u001B[?25hdone\n", + " Created wheel for PyTurboJPEG: filename=PyTurboJPEG-1.7.1-py3-none-any.whl size=12243 sha256=ddf6424c85ac533335abd96dd9e98b014ea1dd4f143c88cd35ecb08d6128f411\n", + " Stored in directory: /root/.cache/pip/wheels/de/6e/b1/e7ba70c328c3395555cb92ca8820babb32950d867858b1948b\n", + " Building wheel for iopath (setup.py) ... \u001B[?25l\u001B[?25hdone\n", + " Created wheel for iopath: filename=iopath-0.1.10-py3-none-any.whl size=31531 sha256=db977a4344bebbdd710665e767caab4fbcf53cc6aea0707cd38d26c45718331e\n", + " Stored in directory: /root/.cache/pip/wheels/9a/a3/b6/ac0fcd1b4ed5cfeb3db92e6a0e476cfd48ed0df92b91080c1d\n", + " Building wheel for pathtools (setup.py) ... \u001B[?25l\u001B[?25hdone\n", + " Created wheel for pathtools: filename=pathtools-0.1.2-py3-none-any.whl size=8791 sha256=08bb5753ce029aef01f25c3e81882d93c0e040e5932e90a02a062ad058756b52\n", + " Stored in directory: /root/.cache/pip/wheels/e7/f3/22/152153d6eb222ee7a56ff8617d80ee5207207a8c00a7aab794\n", + "Successfully built fvcore pims PyTurboJPEG iopath pathtools\n", + "Installing collected packages: slicerator, pathtools, lmdb, av, yacs, smmap, setproctitle, sentry-sdk, PyTurboJPEG, portalocker, docker-pycreds, pims, iopath, gitdb, GitPython, fvcore, wandb\n", + "Successfully installed GitPython-3.1.31 PyTurboJPEG-1.7.1 av-10.0.0 docker-pycreds-0.4.0 fvcore-0.1.5.post20221221 gitdb-4.0.10 iopath-0.1.10 lmdb-1.4.1 pathtools-0.1.2 pims-0.6.1 portalocker-2.7.0 sentry-sdk-1.22.2 setproctitle-1.3.2 slicerator-1.1.0 smmap-5.0.0 wandb-0.15.2 yacs-0.1.8\n" + ] + } + ], + "source": [ + "# install MMEngine, MMCV and MMDetection using MIM\n", + "%pip install -U openmim\n", + "!mim install mmengine\n", + "!mim install \"mmcv>=2.0.0\"\n", + "\n", + "# Install mmaction2\n", + "!rm -rf mmaction2\n", + "!git clone https://github.com/open-mmlab/mmaction2.git -b main\n", + "%cd mmaction2\n", + "\n", + "!pip install -e .\n", + "\n", + "# Install some optional requirements\n", + "!pip install -r requirements/optional.txt" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "No_zZAFpWC-a", + "outputId": "9386dd81-2308-4adb-d3cb-798de11c035e" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "QuZG8kZ2fJ5d" - }, - "source": [ - "## Train a recognizer on customized dataset\n", - "\n", - "To train a new recognizer, there are usually three things to do:\n", - "1. Support a new dataset\n", - "2. Modify the config\n", - "3. Train a new recognizer" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "2.0.0+cu118 True\n", + "1.0.0\n", + "11.8\n", + "GCC 9.3\n", + "OrderedDict([('sys.platform', 'linux'), ('Python', '3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]'), ('CUDA available', True), ('numpy_random_seed', 2147483648), ('GPU 0', 'Tesla T4'), ('CUDA_HOME', '/usr/local/cuda'), ('NVCC', 'Cuda compilation tools, release 11.8, V11.8.89'), ('GCC', 'x86_64-linux-gnu-gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0'), ('PyTorch', '2.0.0+cu118'), ('PyTorch compiling details', 'PyTorch built with:\\n - GCC 9.3\\n - C++ Version: 201703\\n - Intel(R) oneAPI Math Kernel Library Version 2022.2-Product Build 20220804 for Intel(R) 64 architecture applications\\n - Intel(R) MKL-DNN v2.7.3 (Git Hash 6dbeffbae1f23cbbeae17adb7b5b13f1f37c080e)\\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\\n - LAPACK is enabled (usually provided by MKL)\\n - NNPACK is enabled\\n - CPU capability usage: AVX2\\n - CUDA Runtime 11.8\\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_90,code=sm_90\\n - CuDNN 8.7\\n - Magma 2.6.1\\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.8, CUDNN_VERSION=8.7.0, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -D_GLIBCXX_USE_CXX11_ABI=0 -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -O2 -fPIC -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Werror=bool-operation -Wnarrowing -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_DISABLE_GPU_ASSERTS=ON, TORCH_VERSION=2.0.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=1, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \\n'), ('TorchVision', '0.15.1+cu118'), ('OpenCV', '4.7.0'), ('MMEngine', '0.7.3')])\n" + ] + } + ], + "source": [ + "# Check Pytorch installation\n", + "import torch, torchvision\n", + "print(torch.__version__, torch.cuda.is_available())\n", + "\n", + "# Check MMAction2 installation\n", + "import mmaction\n", + "print(mmaction.__version__)\n", + "\n", + "# Check MMCV installation\n", + "from mmcv.ops import get_compiling_cuda_version, get_compiler_version\n", + "print(get_compiling_cuda_version())\n", + "print(get_compiler_version())\n", + "\n", + "# Check MMEngine installation\n", + "from mmengine.utils.dl_utils import collect_env\n", + "print(collect_env())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pXf7oV5DWdab" + }, + "source": [ + "## Perform inference with a MMAction2 recognizer\n", + "MMAction2 already provides high level APIs to do inference and training." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "64CW6d_AaT-Q", + "outputId": "ea330d8c-2e20-4dbd-d046-51d7c9ec4f7a" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "neEFyxChfgiJ" - }, - "source": [ - "### Support a new dataset\n", - "\n", - "In this tutorial, we gives an example to convert the data into the format of existing datasets. Other methods and more advanced usages can be found in the [doc](/docs/tutorials/new_dataset.md)\n", - "\n", - "Firstly, let's download a tiny dataset obtained from [Kinetics-400](https://deepmind.com/research/open-source/open-source-datasets/kinetics/). We select 30 videos with their labels as train dataset and 10 videos with their labels as test dataset." - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "--2023-05-15 03:33:08-- https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth\n", + "Resolving download.openmmlab.com (download.openmmlab.com)... 163.181.82.216, 163.181.82.218, 163.181.82.213, ...\n", + "Connecting to download.openmmlab.com (download.openmmlab.com)|163.181.82.216|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 97579339 (93M) [application/octet-stream]\n", + "Saving to: โ€˜checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pthโ€™\n", + "\n", + "checkpoints/tsn_r50 100%[===================>] 93.06M 26.1MB/s in 3.6s \n", + "\n", + "2023-05-15 03:33:12 (26.2 MB/s) - โ€˜checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pthโ€™ saved [97579339/97579339]\n", + "\n" + ] + } + ], + "source": [ + "!mkdir checkpoints\n", + "!wget -c https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \\\n", + " -O checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "HNZB7NoSabzj", + "outputId": "c0c2ba71-72ff-4cac-a5b8-65590f5a6bb0" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "gjsUj9JzgUlJ", - "outputId": "96a0e6e9-0dd8-4c07-9fed-22b93d5c1318" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "rm: cannot remove 'kinetics400_tiny.zip*': No such file or directory\n", - "--2023-05-15 03:33:27-- https://download.openmmlab.com/mmaction/kinetics400_tiny.zip\n", - "Resolving download.openmmlab.com (download.openmmlab.com)... 163.181.82.216, 163.181.82.218, 163.181.82.213, ...\n", - "Connecting to download.openmmlab.com (download.openmmlab.com)|163.181.82.216|:443... connected.\n", - "HTTP request sent, awaiting response... 200 OK\n", - "Length: 18308682 (17M) [application/zip]\n", - "Saving to: โ€˜kinetics400_tiny.zipโ€™\n", - "\n", - "kinetics400_tiny.zi 100%[===================>] 17.46M 32.7MB/s in 0.5s \n", - "\n", - "2023-05-15 03:33:28 (32.7 MB/s) - โ€˜kinetics400_tiny.zipโ€™ saved [18308682/18308682]\n", - "\n" - ] - } - ], - "source": [ - "# download, decompress the data\n", - "!rm kinetics400_tiny.zip*\n", - "!rm -rf kinetics400_tiny\n", - "!wget https://download.openmmlab.com/mmaction/kinetics400_tiny.zip\n", - "!unzip kinetics400_tiny.zip > /dev/null" - ] - }, + "output_type": "stream", + "name": "stdout", + "text": [ + "Loads checkpoint by local backend from path: checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth\n" + ] + } + ], + "source": [ + "from mmaction.apis import inference_recognizer, init_recognizer\n", + "from mmengine import Config\n", + "\n", + "\n", + "# Choose to use a config and initialize the recognizer\n", + "config = 'configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py'\n", + "config = Config.fromfile(config)\n", + "# Setup a checkpoint file to load\n", + "checkpoint = 'checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth'\n", + "# Initialize the recognizer\n", + "model = init_recognizer(config, checkpoint, device='cuda:0')" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "rEMsBnpHapAn", + "outputId": "ec05049e-7289-4798-94fa-2b773cb23634", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "outputs": [ { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "AbZ-o7V6hNw4", - "outputId": "f229f352-1b43-41b7-a374-21404f618581" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Reading package lists...\n", - "Building dependency tree...\n", - "Reading state information...\n", - "The following NEW packages will be installed:\n", - " tree\n", - "0 upgraded, 1 newly installed, 0 to remove and 24 not upgraded.\n", - "Need to get 43.0 kB of archives.\n", - "After this operation, 115 kB of additional disk space will be used.\n", - "Get:1 http://archive.ubuntu.com/ubuntu focal/universe amd64 tree amd64 1.8.0-1 [43.0 kB]\n", - "Fetched 43.0 kB in 1s (48.9 kB/s)\n", - "Selecting previously unselected package tree.\n", - "(Reading database ... 122519 files and directories currently installed.)\n", - "Preparing to unpack .../tree_1.8.0-1_amd64.deb ...\n", - "Unpacking tree (1.8.0-1) ...\n", - "Setting up tree (1.8.0-1) ...\n", - "Processing triggers for man-db (2.9.1-1) ...\n", - "\u001b[01;34mkinetics400_tiny\u001b[00m\n", - "โ”œโ”€โ”€ kinetics_tiny_train_video.txt\n", - "โ”œโ”€โ”€ kinetics_tiny_val_video.txt\n", - "โ”œโ”€โ”€ \u001b[01;34mtrain\u001b[00m\n", - "โ”‚ย ย  โ”œโ”€โ”€ 27_CSXByd3s.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ 34XczvTaRiI.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ A-wiliK50Zw.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ D32_1gwq35E.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ D92m0HsHjcQ.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ DbX8mPslRXg.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ FMlSTTpN3VY.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ h10B9SVE-nk.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ h2YqqUhnR34.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ iRuyZSKhHRg.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ IyfILH9lBRo.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ kFC3KY2bOP8.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ LvcFDgCAXQs.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ O46YA8tI530.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ oMrZaozOvdQ.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ oXy-e_P_cAI.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ P5M-hAts7MQ.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ phDqGd0NKoo.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ PnOe3GZRVX8.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ R8HXQkdgKWA.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ RqnKtCEoEcA.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ soEcZZsBmDs.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ TkkZPZHbAKA.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ T_TMNGzVrDk.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ WaS0qwP46Us.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ Wh_YPQdH1Zg.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ WWP5HZJsg-o.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ xGY2dP0YUjA.mp4\n", - "โ”‚ย ย  โ”œโ”€โ”€ yLC9CtWU5ws.mp4\n", - "โ”‚ย ย  โ””โ”€โ”€ ZQV4U2KQ370.mp4\n", - "โ””โ”€โ”€ \u001b[01;34mval\u001b[00m\n", - " โ”œโ”€โ”€ 0pVGiAU6XEA.mp4\n", - " โ”œโ”€โ”€ AQrbRSnRt8M.mp4\n", - " โ”œโ”€โ”€ b6Q_b7vgc7Q.mp4\n", - " โ”œโ”€โ”€ ddvJ6-faICE.mp4\n", - " โ”œโ”€โ”€ IcLztCtvhb8.mp4\n", - " โ”œโ”€โ”€ ik4BW3-SCts.mp4\n", - " โ”œโ”€โ”€ jqRrH30V0k4.mp4\n", - " โ”œโ”€โ”€ SU_x2LQqSLs.mp4\n", - " โ”œโ”€โ”€ u4Rm6srmIS8.mp4\n", - " โ””โ”€โ”€ y5Iu7XkTqV0.mp4\n", - "\n", - "2 directories, 42 files\n" - ] - } - ], - "source": [ - "# Check the directory structure of the tiny data\n", - "\n", - "# Install tree first\n", - "!apt-get -q install tree\n", - "!tree kinetics400_tiny" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "05/15 03:33:18 - mmengine - WARNING - \"FileClient\" will be deprecated in future. Please use io functions in https://mmengine.readthedocs.io/en/latest/api/fileio.html#file-io\n", + "05/15 03:33:18 - mmengine - WARNING - \"HardDiskBackend\" is the alias of \"LocalBackend\" and the former will be deprecated in future.\n" + ] + } + ], + "source": [ + "# Use the recognizer to do inference\n", + "from operator import itemgetter\n", + "video = 'demo/demo.mp4'\n", + "label = 'tools/data/kinetics/label_map_k400.txt'\n", + "results = inference_recognizer(model, video)\n", + "\n", + "pred_scores = results.pred_score.tolist()\n", + "score_tuples = tuple(zip(range(len(pred_scores)), pred_scores))\n", + "score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True)\n", + "top5_label = score_sorted[:5]\n", + "\n", + "labels = open(label).readlines()\n", + "labels = [x.strip() for x in labels]\n", + "results = [(labels[k[0]], k[1]) for k in top5_label]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "NIyJXqfWathq", + "outputId": "cb25aca9-e72d-4c54-f295-4c889713cb3a" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": 12, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "fTdi6dI0hY3g", - "outputId": "95f22438-566c-4496-fe0c-50e128b47b5e" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "D32_1gwq35E.mp4 0\n", - "iRuyZSKhHRg.mp4 1\n", - "oXy-e_P_cAI.mp4 0\n", - "34XczvTaRiI.mp4 1\n", - "h2YqqUhnR34.mp4 0\n", - "O46YA8tI530.mp4 0\n", - "kFC3KY2bOP8.mp4 1\n", - "WWP5HZJsg-o.mp4 1\n", - "phDqGd0NKoo.mp4 1\n", - "yLC9CtWU5ws.mp4 0\n", - "27_CSXByd3s.mp4 1\n", - "IyfILH9lBRo.mp4 1\n", - "T_TMNGzVrDk.mp4 1\n", - "TkkZPZHbAKA.mp4 0\n", - "PnOe3GZRVX8.mp4 1\n", - "soEcZZsBmDs.mp4 1\n", - "FMlSTTpN3VY.mp4 1\n", - "WaS0qwP46Us.mp4 0\n", - "A-wiliK50Zw.mp4 1\n", - "oMrZaozOvdQ.mp4 1\n", - "ZQV4U2KQ370.mp4 0\n", - "DbX8mPslRXg.mp4 1\n", - "h10B9SVE-nk.mp4 1\n", - "P5M-hAts7MQ.mp4 0\n", - "R8HXQkdgKWA.mp4 0\n", - "D92m0HsHjcQ.mp4 0\n", - "RqnKtCEoEcA.mp4 0\n", - "LvcFDgCAXQs.mp4 0\n", - "xGY2dP0YUjA.mp4 0\n", - "Wh_YPQdH1Zg.mp4 0\n" - ] - } - ], - "source": [ - "# After downloading the data, we need to check the annotation format\n", - "!cat kinetics400_tiny/kinetics_tiny_train_video.txt" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "The top-5 labels with corresponding scores are:\n", + "arm wrestling: 1.0\n", + "rock scissors paper: 6.434453414527752e-09\n", + "shaking hands: 2.7599860175087088e-09\n", + "clapping: 1.3454612979302283e-09\n", + "massaging feet: 5.555100823784187e-10\n" + ] + } + ], + "source": [ + "print('The top-5 labels with corresponding scores are:')\n", + "for result in results:\n", + " print(f'{result[0]}: ', result[1])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QuZG8kZ2fJ5d" + }, + "source": [ + "## Train a recognizer on customized dataset\n", + "\n", + "To train a new recognizer, there are usually three things to do:\n", + "1. Support a new dataset\n", + "2. Modify the config\n", + "3. Train a new recognizer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "neEFyxChfgiJ" + }, + "source": [ + "### Support a new dataset\n", + "\n", + "In this tutorial, we gives an example to convert the data into the format of existing datasets. Other methods and more advanced usages can be found in the [doc](/docs/tutorials/new_dataset.md)\n", + "\n", + "Firstly, let's download a tiny dataset obtained from [Kinetics-400](https://deepmind.com/research/open-source/open-source-datasets/kinetics/). We select 30 videos with their labels as train dataset and 10 videos with their labels as test dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "gjsUj9JzgUlJ", + "outputId": "96a0e6e9-0dd8-4c07-9fed-22b93d5c1318" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "0bq0mxmEi29H" - }, - "source": [ - "According to the format defined in [`VideoDataset`](./datasets/video_dataset.py), each line indicates a sample video with the filepath and label, which are split with a whitespace." - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "rm: cannot remove 'kinetics400_tiny.zip*': No such file or directory\n", + "--2023-05-15 03:33:27-- https://download.openmmlab.com/mmaction/kinetics400_tiny.zip\n", + "Resolving download.openmmlab.com (download.openmmlab.com)... 163.181.82.216, 163.181.82.218, 163.181.82.213, ...\n", + "Connecting to download.openmmlab.com (download.openmmlab.com)|163.181.82.216|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 18308682 (17M) [application/zip]\n", + "Saving to: โ€˜kinetics400_tiny.zipโ€™\n", + "\n", + "kinetics400_tiny.zi 100%[===================>] 17.46M 32.7MB/s in 0.5s \n", + "\n", + "2023-05-15 03:33:28 (32.7 MB/s) - โ€˜kinetics400_tiny.zipโ€™ saved [18308682/18308682]\n", + "\n" + ] + } + ], + "source": [ + "# download, decompress the data\n", + "!rm kinetics400_tiny.zip*\n", + "!rm -rf kinetics400_tiny\n", + "!wget https://download.openmmlab.com/mmaction/kinetics400_tiny.zip\n", + "!unzip kinetics400_tiny.zip > /dev/null" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "AbZ-o7V6hNw4", + "outputId": "f229f352-1b43-41b7-a374-21404f618581" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "Ht_DGJA9jQar" - }, - "source": [ - "### Modify the config\n", - "\n", - "In the next step, we need to modify the config for the training.\n", - "To accelerate the process, we finetune a recognizer using a pre-trained recognizer." - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "Reading package lists...\n", + "Building dependency tree...\n", + "Reading state information...\n", + "The following NEW packages will be installed:\n", + " tree\n", + "0 upgraded, 1 newly installed, 0 to remove and 24 not upgraded.\n", + "Need to get 43.0 kB of archives.\n", + "After this operation, 115 kB of additional disk space will be used.\n", + "Get:1 http://archive.ubuntu.com/ubuntu focal/universe amd64 tree amd64 1.8.0-1 [43.0 kB]\n", + "Fetched 43.0 kB in 1s (48.9 kB/s)\n", + "Selecting previously unselected package tree.\n", + "(Reading database ... 122519 files and directories currently installed.)\n", + "Preparing to unpack .../tree_1.8.0-1_amd64.deb ...\n", + "Unpacking tree (1.8.0-1) ...\n", + "Setting up tree (1.8.0-1) ...\n", + "Processing triggers for man-db (2.9.1-1) ...\n", + "\u001B[01;34mkinetics400_tiny\u001B[00m\n", + "โ”œโ”€โ”€ kinetics_tiny_train_video.txt\n", + "โ”œโ”€โ”€ kinetics_tiny_val_video.txt\n", + "โ”œโ”€โ”€ \u001B[01;34mtrain\u001B[00m\n", + "โ”‚ย ย  โ”œโ”€โ”€ 27_CSXByd3s.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ 34XczvTaRiI.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ A-wiliK50Zw.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ D32_1gwq35E.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ D92m0HsHjcQ.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ DbX8mPslRXg.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ FMlSTTpN3VY.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ h10B9SVE-nk.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ h2YqqUhnR34.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ iRuyZSKhHRg.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ IyfILH9lBRo.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ kFC3KY2bOP8.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ LvcFDgCAXQs.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ O46YA8tI530.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ oMrZaozOvdQ.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ oXy-e_P_cAI.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ P5M-hAts7MQ.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ phDqGd0NKoo.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ PnOe3GZRVX8.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ R8HXQkdgKWA.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ RqnKtCEoEcA.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ soEcZZsBmDs.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ TkkZPZHbAKA.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ T_TMNGzVrDk.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ WaS0qwP46Us.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ Wh_YPQdH1Zg.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ WWP5HZJsg-o.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ xGY2dP0YUjA.mp4\n", + "โ”‚ย ย  โ”œโ”€โ”€ yLC9CtWU5ws.mp4\n", + "โ”‚ย ย  โ””โ”€โ”€ ZQV4U2KQ370.mp4\n", + "โ””โ”€โ”€ \u001B[01;34mval\u001B[00m\n", + " โ”œโ”€โ”€ 0pVGiAU6XEA.mp4\n", + " โ”œโ”€โ”€ AQrbRSnRt8M.mp4\n", + " โ”œโ”€โ”€ b6Q_b7vgc7Q.mp4\n", + " โ”œโ”€โ”€ ddvJ6-faICE.mp4\n", + " โ”œโ”€โ”€ IcLztCtvhb8.mp4\n", + " โ”œโ”€โ”€ ik4BW3-SCts.mp4\n", + " โ”œโ”€โ”€ jqRrH30V0k4.mp4\n", + " โ”œโ”€โ”€ SU_x2LQqSLs.mp4\n", + " โ”œโ”€โ”€ u4Rm6srmIS8.mp4\n", + " โ””โ”€โ”€ y5Iu7XkTqV0.mp4\n", + "\n", + "2 directories, 42 files\n" + ] + } + ], + "source": [ + "# Check the directory structure of the tiny data\n", + "\n", + "# Install tree first\n", + "!apt-get -q install tree\n", + "!tree kinetics400_tiny" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "fTdi6dI0hY3g", + "outputId": "95f22438-566c-4496-fe0c-50e128b47b5e" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": 13, - "metadata": { - "id": "LjCcmCKOjktc" - }, - "outputs": [], - "source": [ - "cfg = Config.fromfile('./configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py')" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "D32_1gwq35E.mp4 0\n", + "iRuyZSKhHRg.mp4 1\n", + "oXy-e_P_cAI.mp4 0\n", + "34XczvTaRiI.mp4 1\n", + "h2YqqUhnR34.mp4 0\n", + "O46YA8tI530.mp4 0\n", + "kFC3KY2bOP8.mp4 1\n", + "WWP5HZJsg-o.mp4 1\n", + "phDqGd0NKoo.mp4 1\n", + "yLC9CtWU5ws.mp4 0\n", + "27_CSXByd3s.mp4 1\n", + "IyfILH9lBRo.mp4 1\n", + "T_TMNGzVrDk.mp4 1\n", + "TkkZPZHbAKA.mp4 0\n", + "PnOe3GZRVX8.mp4 1\n", + "soEcZZsBmDs.mp4 1\n", + "FMlSTTpN3VY.mp4 1\n", + "WaS0qwP46Us.mp4 0\n", + "A-wiliK50Zw.mp4 1\n", + "oMrZaozOvdQ.mp4 1\n", + "ZQV4U2KQ370.mp4 0\n", + "DbX8mPslRXg.mp4 1\n", + "h10B9SVE-nk.mp4 1\n", + "P5M-hAts7MQ.mp4 0\n", + "R8HXQkdgKWA.mp4 0\n", + "D92m0HsHjcQ.mp4 0\n", + "RqnKtCEoEcA.mp4 0\n", + "LvcFDgCAXQs.mp4 0\n", + "xGY2dP0YUjA.mp4 0\n", + "Wh_YPQdH1Zg.mp4 0\n" + ] + } + ], + "source": [ + "# After downloading the data, we need to check the annotation format\n", + "!cat kinetics400_tiny/kinetics_tiny_train_video.txt" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0bq0mxmEi29H" + }, + "source": [ + "According to the format defined in [`VideoDataset`](./datasets/video_dataset.py), each line indicates a sample video with the filepath and label, which are split with a whitespace." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ht_DGJA9jQar" + }, + "source": [ + "### Modify the config\n", + "\n", + "In the next step, we need to modify the config for the training.\n", + "To accelerate the process, we finetune a recognizer using a pre-trained recognizer." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "LjCcmCKOjktc" + }, + "outputs": [], + "source": [ + "cfg = Config.fromfile('./configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tc8YhFFGjp3e" + }, + "source": [ + "Given a config that trains a TSN model on kinetics400-full dataset, we need to modify some values to use it for training TSN on Kinetics400-tiny dataset.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "tlhu9byjjt-K", + "outputId": "2d984a1d-93f7-493f-fd77-e19af8285f38" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "tc8YhFFGjp3e" - }, - "source": [ - "Given a config that trains a TSN model on kinetics400-full dataset, we need to modify some values to use it for training TSN on Kinetics400-tiny dataset.\n" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "Config:\n", + "model = dict(\n", + " type='Recognizer2D',\n", + " backbone=dict(\n", + " type='ResNet',\n", + " pretrained='https://download.pytorch.org/models/resnet50-11ad3fa6.pth',\n", + " depth=50,\n", + " norm_eval=False),\n", + " cls_head=dict(\n", + " type='TSNHead',\n", + " num_classes=2,\n", + " in_channels=2048,\n", + " spatial_type='avg',\n", + " consensus=dict(type='AvgConsensus', dim=1),\n", + " dropout_ratio=0.4,\n", + " init_std=0.01,\n", + " average_clips='prob'),\n", + " data_preprocessor=dict(\n", + " type='ActionDataPreprocessor',\n", + " mean=[123.675, 116.28, 103.53],\n", + " std=[58.395, 57.12, 57.375],\n", + " format_shape='NCHW'),\n", + " train_cfg=None,\n", + " test_cfg=None)\n", + "train_cfg = dict(\n", + " type='EpochBasedTrainLoop', max_epochs=10, val_begin=1, val_interval=1)\n", + "val_cfg = dict(type='ValLoop')\n", + "test_cfg = dict(type='TestLoop')\n", + "param_scheduler = [\n", + " dict(\n", + " type='MultiStepLR',\n", + " begin=0,\n", + " end=100,\n", + " by_epoch=True,\n", + " milestones=[40, 80],\n", + " gamma=0.1)\n", + "]\n", + "optim_wrapper = dict(\n", + " optimizer=dict(\n", + " type='SGD', lr=7.8125e-05, momentum=0.9, weight_decay=0.0001),\n", + " clip_grad=dict(max_norm=40, norm_type=2))\n", + "default_scope = 'mmaction'\n", + "default_hooks = dict(\n", + " runtime_info=dict(type='RuntimeInfoHook'),\n", + " timer=dict(type='IterTimerHook'),\n", + " logger=dict(type='LoggerHook', interval=20, ignore_last=False),\n", + " param_scheduler=dict(type='ParamSchedulerHook'),\n", + " checkpoint=dict(\n", + " type='CheckpointHook', interval=3, save_best='auto', max_keep_ckpts=3),\n", + " sampler_seed=dict(type='DistSamplerSeedHook'),\n", + " sync_buffers=dict(type='SyncBuffersHook'))\n", + "env_cfg = dict(\n", + " cudnn_benchmark=False,\n", + " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n", + " dist_cfg=dict(backend='nccl'))\n", + "log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True)\n", + "vis_backends = [dict(type='LocalVisBackend')]\n", + "visualizer = dict(\n", + " type='ActionVisualizer', vis_backends=[dict(type='LocalVisBackend')])\n", + "log_level = 'INFO'\n", + "load_from = './checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth'\n", + "resume = False\n", + "dataset_type = 'VideoDataset'\n", + "data_root = 'kinetics400_tiny/train/'\n", + "data_root_val = 'kinetics400_tiny/val/'\n", + "ann_file_train = 'kinetics400_tiny/kinetics_tiny_train_video.txt'\n", + "ann_file_val = 'kinetics400_tiny/kinetics_tiny_val_video.txt'\n", + "file_client_args = dict(io_backend='disk')\n", + "train_pipeline = [\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(\n", + " type='MultiScaleCrop',\n", + " input_size=224,\n", + " scales=(1, 0.875, 0.75, 0.66),\n", + " random_crop=False,\n", + " max_wh_scale_gap=1),\n", + " dict(type='Resize', scale=(224, 224), keep_ratio=False),\n", + " dict(type='Flip', flip_ratio=0.5),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + "]\n", + "val_pipeline = [\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(\n", + " type='SampleFrames',\n", + " clip_len=1,\n", + " frame_interval=1,\n", + " num_clips=3,\n", + " test_mode=True),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + "]\n", + "test_pipeline = [\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(\n", + " type='SampleFrames',\n", + " clip_len=1,\n", + " frame_interval=1,\n", + " num_clips=25,\n", + " test_mode=True),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(type='TenCrop', crop_size=224),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + "]\n", + "train_dataloader = dict(\n", + " batch_size=2,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=True),\n", + " dataset=dict(\n", + " type='VideoDataset',\n", + " ann_file='kinetics400_tiny/kinetics_tiny_train_video.txt',\n", + " data_prefix=dict(video='kinetics400_tiny/train/'),\n", + " pipeline=[\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(\n", + " type='SampleFrames', clip_len=1, frame_interval=1,\n", + " num_clips=3),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(\n", + " type='MultiScaleCrop',\n", + " input_size=224,\n", + " scales=(1, 0.875, 0.75, 0.66),\n", + " random_crop=False,\n", + " max_wh_scale_gap=1),\n", + " dict(type='Resize', scale=(224, 224), keep_ratio=False),\n", + " dict(type='Flip', flip_ratio=0.5),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + " ]))\n", + "val_dataloader = dict(\n", + " batch_size=2,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=False),\n", + " dataset=dict(\n", + " type='VideoDataset',\n", + " ann_file='kinetics400_tiny/kinetics_tiny_val_video.txt',\n", + " data_prefix=dict(video='kinetics400_tiny/val/'),\n", + " pipeline=[\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(\n", + " type='SampleFrames',\n", + " clip_len=1,\n", + " frame_interval=1,\n", + " num_clips=3,\n", + " test_mode=True),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + " ],\n", + " test_mode=True))\n", + "test_dataloader = dict(\n", + " batch_size=1,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=False),\n", + " dataset=dict(\n", + " type='VideoDataset',\n", + " ann_file='kinetics400_tiny/kinetics_tiny_val_video.txt',\n", + " data_prefix=dict(video='kinetics400_tiny/val/'),\n", + " pipeline=[\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(\n", + " type='SampleFrames',\n", + " clip_len=1,\n", + " frame_interval=1,\n", + " num_clips=25,\n", + " test_mode=True),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(type='TenCrop', crop_size=224),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + " ],\n", + " test_mode=True))\n", + "val_evaluator = dict(type='AccMetric')\n", + "test_evaluator = dict(type='AccMetric')\n", + "auto_scale_lr = dict(enable=False, base_batch_size=256)\n", + "work_dir = './tutorial_exps'\n", + "\n" + ] + } + ], + "source": [ + "from mmengine.runner import set_random_seed\n", + "\n", + "# Modify dataset type and path\n", + "cfg.data_root = 'kinetics400_tiny/train/'\n", + "cfg.data_root_val = 'kinetics400_tiny/val/'\n", + "cfg.ann_file_train = 'kinetics400_tiny/kinetics_tiny_train_video.txt'\n", + "cfg.ann_file_val = 'kinetics400_tiny/kinetics_tiny_val_video.txt'\n", + "\n", + "\n", + "cfg.test_dataloader.dataset.ann_file = 'kinetics400_tiny/kinetics_tiny_val_video.txt'\n", + "cfg.test_dataloader.dataset.data_prefix.video = 'kinetics400_tiny/val/'\n", + "\n", + "cfg.train_dataloader.dataset.ann_file = 'kinetics400_tiny/kinetics_tiny_train_video.txt'\n", + "cfg.train_dataloader.dataset.data_prefix.video = 'kinetics400_tiny/train/'\n", + "\n", + "cfg.val_dataloader.dataset.ann_file = 'kinetics400_tiny/kinetics_tiny_val_video.txt'\n", + "cfg.val_dataloader.dataset.data_prefix.video = 'kinetics400_tiny/val/'\n", + "\n", + "\n", + "# Modify num classes of the model in cls_head\n", + "cfg.model.cls_head.num_classes = 2\n", + "# We can use the pre-trained TSN model\n", + "cfg.load_from = './checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth'\n", + "\n", + "# Set up working dir to save files and logs.\n", + "cfg.work_dir = './tutorial_exps'\n", + "\n", + "# The original learning rate (LR) is set for 8-GPU training.\n", + "# We divide it by 8 since we only use one GPU.\n", + "cfg.train_dataloader.batch_size = cfg.train_dataloader.batch_size // 16\n", + "cfg.val_dataloader.batch_size = cfg.val_dataloader.batch_size // 16\n", + "cfg.optim_wrapper.optimizer.lr = cfg.optim_wrapper.optimizer.lr / 8 / 16\n", + "cfg.train_cfg.max_epochs = 10\n", + "\n", + "cfg.train_dataloader.num_workers = 2\n", + "cfg.val_dataloader.num_workers = 2\n", + "cfg.test_dataloader.num_workers = 2\n", + "\n", + "# We can initialize the logger for training and have a look\n", + "# at the final config used for training\n", + "print(f'Config:\\n{cfg.pretty_text}')\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tES-qnZ3k38Z" + }, + "source": [ + "### Train a new recognizer\n", + "\n", + "Finally, lets initialize the dataset and recognizer, then train a new recognizer!" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "dDBWkdDRk6oz", + "outputId": "044b9e09-2038-41c9-d5a3-8a74ae11ade2" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": 14, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "tlhu9byjjt-K", - "outputId": "2d984a1d-93f7-493f-fd77-e19af8285f38" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Config:\n", - "model = dict(\n", - " type='Recognizer2D',\n", - " backbone=dict(\n", - " type='ResNet',\n", - " pretrained='https://download.pytorch.org/models/resnet50-11ad3fa6.pth',\n", - " depth=50,\n", - " norm_eval=False),\n", - " cls_head=dict(\n", - " type='TSNHead',\n", - " num_classes=2,\n", - " in_channels=2048,\n", - " spatial_type='avg',\n", - " consensus=dict(type='AvgConsensus', dim=1),\n", - " dropout_ratio=0.4,\n", - " init_std=0.01,\n", - " average_clips='prob'),\n", - " data_preprocessor=dict(\n", - " type='ActionDataPreprocessor',\n", - " mean=[123.675, 116.28, 103.53],\n", - " std=[58.395, 57.12, 57.375],\n", - " format_shape='NCHW'),\n", - " train_cfg=None,\n", - " test_cfg=None)\n", - "train_cfg = dict(\n", - " type='EpochBasedTrainLoop', max_epochs=10, val_begin=1, val_interval=1)\n", - "val_cfg = dict(type='ValLoop')\n", - "test_cfg = dict(type='TestLoop')\n", - "param_scheduler = [\n", - " dict(\n", - " type='MultiStepLR',\n", - " begin=0,\n", - " end=100,\n", - " by_epoch=True,\n", - " milestones=[40, 80],\n", - " gamma=0.1)\n", - "]\n", - "optim_wrapper = dict(\n", - " optimizer=dict(\n", - " type='SGD', lr=7.8125e-05, momentum=0.9, weight_decay=0.0001),\n", - " clip_grad=dict(max_norm=40, norm_type=2))\n", - "default_scope = 'mmaction'\n", - "default_hooks = dict(\n", - " runtime_info=dict(type='RuntimeInfoHook'),\n", - " timer=dict(type='IterTimerHook'),\n", - " logger=dict(type='LoggerHook', interval=20, ignore_last=False),\n", - " param_scheduler=dict(type='ParamSchedulerHook'),\n", - " checkpoint=dict(\n", - " type='CheckpointHook', interval=3, save_best='auto', max_keep_ckpts=3),\n", - " sampler_seed=dict(type='DistSamplerSeedHook'),\n", - " sync_buffers=dict(type='SyncBuffersHook'))\n", - "env_cfg = dict(\n", - " cudnn_benchmark=False,\n", - " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n", - " dist_cfg=dict(backend='nccl'))\n", - "log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True)\n", - "vis_backends = [dict(type='LocalVisBackend')]\n", - "visualizer = dict(\n", - " type='ActionVisualizer', vis_backends=[dict(type='LocalVisBackend')])\n", - "log_level = 'INFO'\n", - "load_from = './checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth'\n", - "resume = False\n", - "dataset_type = 'VideoDataset'\n", - "data_root = 'kinetics400_tiny/train/'\n", - "data_root_val = 'kinetics400_tiny/val/'\n", - "ann_file_train = 'kinetics400_tiny/kinetics_tiny_train_video.txt'\n", - "ann_file_val = 'kinetics400_tiny/kinetics_tiny_val_video.txt'\n", - "file_client_args = dict(io_backend='disk')\n", - "train_pipeline = [\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(\n", - " type='MultiScaleCrop',\n", - " input_size=224,\n", - " scales=(1, 0.875, 0.75, 0.66),\n", - " random_crop=False,\n", - " max_wh_scale_gap=1),\n", - " dict(type='Resize', scale=(224, 224), keep_ratio=False),\n", - " dict(type='Flip', flip_ratio=0.5),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - "]\n", - "val_pipeline = [\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(\n", - " type='SampleFrames',\n", - " clip_len=1,\n", - " frame_interval=1,\n", - " num_clips=3,\n", - " test_mode=True),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(type='CenterCrop', crop_size=224),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - "]\n", - "test_pipeline = [\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(\n", - " type='SampleFrames',\n", - " clip_len=1,\n", - " frame_interval=1,\n", - " num_clips=25,\n", - " test_mode=True),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(type='TenCrop', crop_size=224),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - "]\n", - "train_dataloader = dict(\n", - " batch_size=2,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " sampler=dict(type='DefaultSampler', shuffle=True),\n", - " dataset=dict(\n", - " type='VideoDataset',\n", - " ann_file='kinetics400_tiny/kinetics_tiny_train_video.txt',\n", - " data_prefix=dict(video='kinetics400_tiny/train/'),\n", - " pipeline=[\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(\n", - " type='SampleFrames', clip_len=1, frame_interval=1,\n", - " num_clips=3),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(\n", - " type='MultiScaleCrop',\n", - " input_size=224,\n", - " scales=(1, 0.875, 0.75, 0.66),\n", - " random_crop=False,\n", - " max_wh_scale_gap=1),\n", - " dict(type='Resize', scale=(224, 224), keep_ratio=False),\n", - " dict(type='Flip', flip_ratio=0.5),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - " ]))\n", - "val_dataloader = dict(\n", - " batch_size=2,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " sampler=dict(type='DefaultSampler', shuffle=False),\n", - " dataset=dict(\n", - " type='VideoDataset',\n", - " ann_file='kinetics400_tiny/kinetics_tiny_val_video.txt',\n", - " data_prefix=dict(video='kinetics400_tiny/val/'),\n", - " pipeline=[\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(\n", - " type='SampleFrames',\n", - " clip_len=1,\n", - " frame_interval=1,\n", - " num_clips=3,\n", - " test_mode=True),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(type='CenterCrop', crop_size=224),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - " ],\n", - " test_mode=True))\n", - "test_dataloader = dict(\n", - " batch_size=1,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " sampler=dict(type='DefaultSampler', shuffle=False),\n", - " dataset=dict(\n", - " type='VideoDataset',\n", - " ann_file='kinetics400_tiny/kinetics_tiny_val_video.txt',\n", - " data_prefix=dict(video='kinetics400_tiny/val/'),\n", - " pipeline=[\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(\n", - " type='SampleFrames',\n", - " clip_len=1,\n", - " frame_interval=1,\n", - " num_clips=25,\n", - " test_mode=True),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(type='TenCrop', crop_size=224),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - " ],\n", - " test_mode=True))\n", - "val_evaluator = dict(type='AccMetric')\n", - "test_evaluator = dict(type='AccMetric')\n", - "auto_scale_lr = dict(enable=False, base_batch_size=256)\n", - "work_dir = './tutorial_exps'\n", - "\n" - ] - } - ], - "source": [ - "from mmengine.runner import set_random_seed\n", - "\n", - "# Modify dataset type and path\n", - "cfg.data_root = 'kinetics400_tiny/train/'\n", - "cfg.data_root_val = 'kinetics400_tiny/val/'\n", - "cfg.ann_file_train = 'kinetics400_tiny/kinetics_tiny_train_video.txt'\n", - "cfg.ann_file_val = 'kinetics400_tiny/kinetics_tiny_val_video.txt'\n", - "\n", - "\n", - "cfg.test_dataloader.dataset.ann_file = 'kinetics400_tiny/kinetics_tiny_val_video.txt'\n", - "cfg.test_dataloader.dataset.data_prefix.video = 'kinetics400_tiny/val/'\n", - "\n", - "cfg.train_dataloader.dataset.ann_file = 'kinetics400_tiny/kinetics_tiny_train_video.txt'\n", - "cfg.train_dataloader.dataset.data_prefix.video = 'kinetics400_tiny/train/'\n", - "\n", - "cfg.val_dataloader.dataset.ann_file = 'kinetics400_tiny/kinetics_tiny_val_video.txt'\n", - "cfg.val_dataloader.dataset.data_prefix.video = 'kinetics400_tiny/val/'\n", - "\n", - "\n", - "# Modify num classes of the model in cls_head\n", - "cfg.model.cls_head.num_classes = 2\n", - "# We can use the pre-trained TSN model\n", - "cfg.load_from = './checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth'\n", - "\n", - "# Set up working dir to save files and logs.\n", - "cfg.work_dir = './tutorial_exps'\n", - "\n", - "# The original learning rate (LR) is set for 8-GPU training.\n", - "# We divide it by 8 since we only use one GPU.\n", - "cfg.train_dataloader.batch_size = cfg.train_dataloader.batch_size // 16\n", - "cfg.val_dataloader.batch_size = cfg.val_dataloader.batch_size // 16\n", - "cfg.optim_wrapper.optimizer.lr = cfg.optim_wrapper.optimizer.lr / 8 / 16\n", - "cfg.train_cfg.max_epochs = 10\n", - "\n", - "cfg.train_dataloader.num_workers = 2\n", - "cfg.val_dataloader.num_workers = 2\n", - "cfg.test_dataloader.num_workers = 2\n", - "\n", - "# We can initialize the logger for training and have a look\n", - "# at the final config used for training\n", - "print(f'Config:\\n{cfg.pretty_text}')\n" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "05/15 03:33:34 - mmengine - INFO - \n", + "------------------------------------------------------------\n", + "System environment:\n", + " sys.platform: linux\n", + " Python: 3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]\n", + " CUDA available: True\n", + " numpy_random_seed: 1853452922\n", + " GPU 0: Tesla T4\n", + " CUDA_HOME: /usr/local/cuda\n", + " NVCC: Cuda compilation tools, release 11.8, V11.8.89\n", + " GCC: x86_64-linux-gnu-gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\n", + " PyTorch: 2.0.0+cu118\n", + " PyTorch compiling details: PyTorch built with:\n", + " - GCC 9.3\n", + " - C++ Version: 201703\n", + " - Intel(R) oneAPI Math Kernel Library Version 2022.2-Product Build 20220804 for Intel(R) 64 architecture applications\n", + " - Intel(R) MKL-DNN v2.7.3 (Git Hash 6dbeffbae1f23cbbeae17adb7b5b13f1f37c080e)\n", + " - OpenMP 201511 (a.k.a. OpenMP 4.5)\n", + " - LAPACK is enabled (usually provided by MKL)\n", + " - NNPACK is enabled\n", + " - CPU capability usage: AVX2\n", + " - CUDA Runtime 11.8\n", + " - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_90,code=sm_90\n", + " - CuDNN 8.7\n", + " - Magma 2.6.1\n", + " - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.8, CUDNN_VERSION=8.7.0, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -D_GLIBCXX_USE_CXX11_ABI=0 -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -O2 -fPIC -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Werror=bool-operation -Wnarrowing -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_DISABLE_GPU_ASSERTS=ON, TORCH_VERSION=2.0.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=1, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n", + "\n", + " TorchVision: 0.15.1+cu118\n", + " OpenCV: 4.7.0\n", + " MMEngine: 0.7.3\n", + "\n", + "Runtime environment:\n", + " cudnn_benchmark: False\n", + " mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}\n", + " dist_cfg: {'backend': 'nccl'}\n", + " seed: None\n", + " Distributed launcher: none\n", + " Distributed training: False\n", + " GPU number: 1\n", + "------------------------------------------------------------\n", + "\n", + "05/15 03:33:34 - mmengine - INFO - Config:\n", + "model = dict(\n", + " type='Recognizer2D',\n", + " backbone=dict(\n", + " type='ResNet',\n", + " pretrained='https://download.pytorch.org/models/resnet50-11ad3fa6.pth',\n", + " depth=50,\n", + " norm_eval=False),\n", + " cls_head=dict(\n", + " type='TSNHead',\n", + " num_classes=2,\n", + " in_channels=2048,\n", + " spatial_type='avg',\n", + " consensus=dict(type='AvgConsensus', dim=1),\n", + " dropout_ratio=0.4,\n", + " init_std=0.01,\n", + " average_clips='prob'),\n", + " data_preprocessor=dict(\n", + " type='ActionDataPreprocessor',\n", + " mean=[123.675, 116.28, 103.53],\n", + " std=[58.395, 57.12, 57.375],\n", + " format_shape='NCHW'),\n", + " train_cfg=None,\n", + " test_cfg=None)\n", + "train_cfg = dict(\n", + " type='EpochBasedTrainLoop', max_epochs=10, val_begin=1, val_interval=1)\n", + "val_cfg = dict(type='ValLoop')\n", + "test_cfg = dict(type='TestLoop')\n", + "param_scheduler = [\n", + " dict(\n", + " type='MultiStepLR',\n", + " begin=0,\n", + " end=100,\n", + " by_epoch=True,\n", + " milestones=[40, 80],\n", + " gamma=0.1)\n", + "]\n", + "optim_wrapper = dict(\n", + " optimizer=dict(\n", + " type='SGD', lr=7.8125e-05, momentum=0.9, weight_decay=0.0001),\n", + " clip_grad=dict(max_norm=40, norm_type=2))\n", + "default_scope = 'mmaction'\n", + "default_hooks = dict(\n", + " runtime_info=dict(type='RuntimeInfoHook'),\n", + " timer=dict(type='IterTimerHook'),\n", + " logger=dict(type='LoggerHook', interval=20, ignore_last=False),\n", + " param_scheduler=dict(type='ParamSchedulerHook'),\n", + " checkpoint=dict(\n", + " type='CheckpointHook', interval=3, save_best='auto', max_keep_ckpts=3),\n", + " sampler_seed=dict(type='DistSamplerSeedHook'),\n", + " sync_buffers=dict(type='SyncBuffersHook'))\n", + "env_cfg = dict(\n", + " cudnn_benchmark=False,\n", + " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n", + " dist_cfg=dict(backend='nccl'))\n", + "log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True)\n", + "vis_backends = [dict(type='LocalVisBackend')]\n", + "visualizer = dict(\n", + " type='ActionVisualizer', vis_backends=[dict(type='LocalVisBackend')])\n", + "log_level = 'INFO'\n", + "load_from = './checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth'\n", + "resume = False\n", + "dataset_type = 'VideoDataset'\n", + "data_root = 'kinetics400_tiny/train/'\n", + "data_root_val = 'kinetics400_tiny/val/'\n", + "ann_file_train = 'kinetics400_tiny/kinetics_tiny_train_video.txt'\n", + "ann_file_val = 'kinetics400_tiny/kinetics_tiny_val_video.txt'\n", + "file_client_args = dict(io_backend='disk')\n", + "train_pipeline = [\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(\n", + " type='MultiScaleCrop',\n", + " input_size=224,\n", + " scales=(1, 0.875, 0.75, 0.66),\n", + " random_crop=False,\n", + " max_wh_scale_gap=1),\n", + " dict(type='Resize', scale=(224, 224), keep_ratio=False),\n", + " dict(type='Flip', flip_ratio=0.5),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + "]\n", + "val_pipeline = [\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(\n", + " type='SampleFrames',\n", + " clip_len=1,\n", + " frame_interval=1,\n", + " num_clips=3,\n", + " test_mode=True),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + "]\n", + "test_pipeline = [\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(\n", + " type='SampleFrames',\n", + " clip_len=1,\n", + " frame_interval=1,\n", + " num_clips=25,\n", + " test_mode=True),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(type='TenCrop', crop_size=224),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + "]\n", + "train_dataloader = dict(\n", + " batch_size=2,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=True),\n", + " dataset=dict(\n", + " type='VideoDataset',\n", + " ann_file='kinetics400_tiny/kinetics_tiny_train_video.txt',\n", + " data_prefix=dict(video='kinetics400_tiny/train/'),\n", + " pipeline=[\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(\n", + " type='SampleFrames', clip_len=1, frame_interval=1,\n", + " num_clips=3),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(\n", + " type='MultiScaleCrop',\n", + " input_size=224,\n", + " scales=(1, 0.875, 0.75, 0.66),\n", + " random_crop=False,\n", + " max_wh_scale_gap=1),\n", + " dict(type='Resize', scale=(224, 224), keep_ratio=False),\n", + " dict(type='Flip', flip_ratio=0.5),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + " ]))\n", + "val_dataloader = dict(\n", + " batch_size=2,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=False),\n", + " dataset=dict(\n", + " type='VideoDataset',\n", + " ann_file='kinetics400_tiny/kinetics_tiny_val_video.txt',\n", + " data_prefix=dict(video='kinetics400_tiny/val/'),\n", + " pipeline=[\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(\n", + " type='SampleFrames',\n", + " clip_len=1,\n", + " frame_interval=1,\n", + " num_clips=3,\n", + " test_mode=True),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(type='CenterCrop', crop_size=224),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + " ],\n", + " test_mode=True))\n", + "test_dataloader = dict(\n", + " batch_size=1,\n", + " num_workers=2,\n", + " persistent_workers=True,\n", + " sampler=dict(type='DefaultSampler', shuffle=False),\n", + " dataset=dict(\n", + " type='VideoDataset',\n", + " ann_file='kinetics400_tiny/kinetics_tiny_val_video.txt',\n", + " data_prefix=dict(video='kinetics400_tiny/val/'),\n", + " pipeline=[\n", + " dict(type='DecordInit', io_backend='disk'),\n", + " dict(\n", + " type='SampleFrames',\n", + " clip_len=1,\n", + " frame_interval=1,\n", + " num_clips=25,\n", + " test_mode=True),\n", + " dict(type='DecordDecode'),\n", + " dict(type='Resize', scale=(-1, 256)),\n", + " dict(type='TenCrop', crop_size=224),\n", + " dict(type='FormatShape', input_format='NCHW'),\n", + " dict(type='PackActionInputs')\n", + " ],\n", + " test_mode=True))\n", + "val_evaluator = dict(type='AccMetric')\n", + "test_evaluator = dict(type='AccMetric')\n", + "auto_scale_lr = dict(enable=False, base_batch_size=256)\n", + "work_dir = './tutorial_exps'\n", + "\n", + "05/15 03:33:35 - mmengine - INFO - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used.\n", + "05/15 03:33:35 - mmengine - INFO - Hooks will be executed in the following order:\n", + "before_run:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(BELOW_NORMAL) LoggerHook \n", + " -------------------- \n", + "before_train:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + "(VERY_LOW ) CheckpointHook \n", + " -------------------- \n", + "before_train_epoch:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + "(NORMAL ) DistSamplerSeedHook \n", + " -------------------- \n", + "before_train_iter:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + " -------------------- \n", + "after_train_iter:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + "(BELOW_NORMAL) LoggerHook \n", + "(LOW ) ParamSchedulerHook \n", + "(VERY_LOW ) CheckpointHook \n", + " -------------------- \n", + "after_train_epoch:\n", + "(NORMAL ) IterTimerHook \n", + "(NORMAL ) SyncBuffersHook \n", + "(LOW ) ParamSchedulerHook \n", + "(VERY_LOW ) CheckpointHook \n", + " -------------------- \n", + "before_val_epoch:\n", + "(NORMAL ) IterTimerHook \n", + "(NORMAL ) SyncBuffersHook \n", + " -------------------- \n", + "before_val_iter:\n", + "(NORMAL ) IterTimerHook \n", + " -------------------- \n", + "after_val_iter:\n", + "(NORMAL ) IterTimerHook \n", + "(BELOW_NORMAL) LoggerHook \n", + " -------------------- \n", + "after_val_epoch:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + "(BELOW_NORMAL) LoggerHook \n", + "(LOW ) ParamSchedulerHook \n", + "(VERY_LOW ) CheckpointHook \n", + " -------------------- \n", + "after_train:\n", + "(VERY_LOW ) CheckpointHook \n", + " -------------------- \n", + "before_test_epoch:\n", + "(NORMAL ) IterTimerHook \n", + " -------------------- \n", + "before_test_iter:\n", + "(NORMAL ) IterTimerHook \n", + " -------------------- \n", + "after_test_iter:\n", + "(NORMAL ) IterTimerHook \n", + "(BELOW_NORMAL) LoggerHook \n", + " -------------------- \n", + "after_test_epoch:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + "(BELOW_NORMAL) LoggerHook \n", + " -------------------- \n", + "after_run:\n", + "(BELOW_NORMAL) LoggerHook \n", + " -------------------- \n", + "Loads checkpoint by http backend from path: https://download.pytorch.org/models/resnet50-11ad3fa6.pth\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "tES-qnZ3k38Z" - }, - "source": [ - "### Train a new recognizer\n", - "\n", - "Finally, lets initialize the dataset and recognizer, then train a new recognizer!" - ] + "output_type": "stream", + "name": "stderr", + "text": [ + "Downloading: \"https://download.pytorch.org/models/resnet50-11ad3fa6.pth\" to /root/.cache/torch/hub/checkpoints/resnet50-11ad3fa6.pth\n" + ] }, { - "cell_type": "code", - "execution_count": 15, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "dDBWkdDRk6oz", - "outputId": "044b9e09-2038-41c9-d5a3-8a74ae11ade2" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "05/15 03:33:34 - mmengine - INFO - \n", - "------------------------------------------------------------\n", - "System environment:\n", - " sys.platform: linux\n", - " Python: 3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]\n", - " CUDA available: True\n", - " numpy_random_seed: 1853452922\n", - " GPU 0: Tesla T4\n", - " CUDA_HOME: /usr/local/cuda\n", - " NVCC: Cuda compilation tools, release 11.8, V11.8.89\n", - " GCC: x86_64-linux-gnu-gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\n", - " PyTorch: 2.0.0+cu118\n", - " PyTorch compiling details: PyTorch built with:\n", - " - GCC 9.3\n", - " - C++ Version: 201703\n", - " - Intel(R) oneAPI Math Kernel Library Version 2022.2-Product Build 20220804 for Intel(R) 64 architecture applications\n", - " - Intel(R) MKL-DNN v2.7.3 (Git Hash 6dbeffbae1f23cbbeae17adb7b5b13f1f37c080e)\n", - " - OpenMP 201511 (a.k.a. OpenMP 4.5)\n", - " - LAPACK is enabled (usually provided by MKL)\n", - " - NNPACK is enabled\n", - " - CPU capability usage: AVX2\n", - " - CUDA Runtime 11.8\n", - " - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_90,code=sm_90\n", - " - CuDNN 8.7\n", - " - Magma 2.6.1\n", - " - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.8, CUDNN_VERSION=8.7.0, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -D_GLIBCXX_USE_CXX11_ABI=0 -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -O2 -fPIC -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Werror=bool-operation -Wnarrowing -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_DISABLE_GPU_ASSERTS=ON, TORCH_VERSION=2.0.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=1, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n", - "\n", - " TorchVision: 0.15.1+cu118\n", - " OpenCV: 4.7.0\n", - " MMEngine: 0.7.3\n", - "\n", - "Runtime environment:\n", - " cudnn_benchmark: False\n", - " mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}\n", - " dist_cfg: {'backend': 'nccl'}\n", - " seed: None\n", - " Distributed launcher: none\n", - " Distributed training: False\n", - " GPU number: 1\n", - "------------------------------------------------------------\n", - "\n", - "05/15 03:33:34 - mmengine - INFO - Config:\n", - "model = dict(\n", - " type='Recognizer2D',\n", - " backbone=dict(\n", - " type='ResNet',\n", - " pretrained='https://download.pytorch.org/models/resnet50-11ad3fa6.pth',\n", - " depth=50,\n", - " norm_eval=False),\n", - " cls_head=dict(\n", - " type='TSNHead',\n", - " num_classes=2,\n", - " in_channels=2048,\n", - " spatial_type='avg',\n", - " consensus=dict(type='AvgConsensus', dim=1),\n", - " dropout_ratio=0.4,\n", - " init_std=0.01,\n", - " average_clips='prob'),\n", - " data_preprocessor=dict(\n", - " type='ActionDataPreprocessor',\n", - " mean=[123.675, 116.28, 103.53],\n", - " std=[58.395, 57.12, 57.375],\n", - " format_shape='NCHW'),\n", - " train_cfg=None,\n", - " test_cfg=None)\n", - "train_cfg = dict(\n", - " type='EpochBasedTrainLoop', max_epochs=10, val_begin=1, val_interval=1)\n", - "val_cfg = dict(type='ValLoop')\n", - "test_cfg = dict(type='TestLoop')\n", - "param_scheduler = [\n", - " dict(\n", - " type='MultiStepLR',\n", - " begin=0,\n", - " end=100,\n", - " by_epoch=True,\n", - " milestones=[40, 80],\n", - " gamma=0.1)\n", - "]\n", - "optim_wrapper = dict(\n", - " optimizer=dict(\n", - " type='SGD', lr=7.8125e-05, momentum=0.9, weight_decay=0.0001),\n", - " clip_grad=dict(max_norm=40, norm_type=2))\n", - "default_scope = 'mmaction'\n", - "default_hooks = dict(\n", - " runtime_info=dict(type='RuntimeInfoHook'),\n", - " timer=dict(type='IterTimerHook'),\n", - " logger=dict(type='LoggerHook', interval=20, ignore_last=False),\n", - " param_scheduler=dict(type='ParamSchedulerHook'),\n", - " checkpoint=dict(\n", - " type='CheckpointHook', interval=3, save_best='auto', max_keep_ckpts=3),\n", - " sampler_seed=dict(type='DistSamplerSeedHook'),\n", - " sync_buffers=dict(type='SyncBuffersHook'))\n", - "env_cfg = dict(\n", - " cudnn_benchmark=False,\n", - " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n", - " dist_cfg=dict(backend='nccl'))\n", - "log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True)\n", - "vis_backends = [dict(type='LocalVisBackend')]\n", - "visualizer = dict(\n", - " type='ActionVisualizer', vis_backends=[dict(type='LocalVisBackend')])\n", - "log_level = 'INFO'\n", - "load_from = './checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth'\n", - "resume = False\n", - "dataset_type = 'VideoDataset'\n", - "data_root = 'kinetics400_tiny/train/'\n", - "data_root_val = 'kinetics400_tiny/val/'\n", - "ann_file_train = 'kinetics400_tiny/kinetics_tiny_train_video.txt'\n", - "ann_file_val = 'kinetics400_tiny/kinetics_tiny_val_video.txt'\n", - "file_client_args = dict(io_backend='disk')\n", - "train_pipeline = [\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(\n", - " type='MultiScaleCrop',\n", - " input_size=224,\n", - " scales=(1, 0.875, 0.75, 0.66),\n", - " random_crop=False,\n", - " max_wh_scale_gap=1),\n", - " dict(type='Resize', scale=(224, 224), keep_ratio=False),\n", - " dict(type='Flip', flip_ratio=0.5),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - "]\n", - "val_pipeline = [\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(\n", - " type='SampleFrames',\n", - " clip_len=1,\n", - " frame_interval=1,\n", - " num_clips=3,\n", - " test_mode=True),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(type='CenterCrop', crop_size=224),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - "]\n", - "test_pipeline = [\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(\n", - " type='SampleFrames',\n", - " clip_len=1,\n", - " frame_interval=1,\n", - " num_clips=25,\n", - " test_mode=True),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(type='TenCrop', crop_size=224),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - "]\n", - "train_dataloader = dict(\n", - " batch_size=2,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " sampler=dict(type='DefaultSampler', shuffle=True),\n", - " dataset=dict(\n", - " type='VideoDataset',\n", - " ann_file='kinetics400_tiny/kinetics_tiny_train_video.txt',\n", - " data_prefix=dict(video='kinetics400_tiny/train/'),\n", - " pipeline=[\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(\n", - " type='SampleFrames', clip_len=1, frame_interval=1,\n", - " num_clips=3),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(\n", - " type='MultiScaleCrop',\n", - " input_size=224,\n", - " scales=(1, 0.875, 0.75, 0.66),\n", - " random_crop=False,\n", - " max_wh_scale_gap=1),\n", - " dict(type='Resize', scale=(224, 224), keep_ratio=False),\n", - " dict(type='Flip', flip_ratio=0.5),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - " ]))\n", - "val_dataloader = dict(\n", - " batch_size=2,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " sampler=dict(type='DefaultSampler', shuffle=False),\n", - " dataset=dict(\n", - " type='VideoDataset',\n", - " ann_file='kinetics400_tiny/kinetics_tiny_val_video.txt',\n", - " data_prefix=dict(video='kinetics400_tiny/val/'),\n", - " pipeline=[\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(\n", - " type='SampleFrames',\n", - " clip_len=1,\n", - " frame_interval=1,\n", - " num_clips=3,\n", - " test_mode=True),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(type='CenterCrop', crop_size=224),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - " ],\n", - " test_mode=True))\n", - "test_dataloader = dict(\n", - " batch_size=1,\n", - " num_workers=2,\n", - " persistent_workers=True,\n", - " sampler=dict(type='DefaultSampler', shuffle=False),\n", - " dataset=dict(\n", - " type='VideoDataset',\n", - " ann_file='kinetics400_tiny/kinetics_tiny_val_video.txt',\n", - " data_prefix=dict(video='kinetics400_tiny/val/'),\n", - " pipeline=[\n", - " dict(type='DecordInit', io_backend='disk'),\n", - " dict(\n", - " type='SampleFrames',\n", - " clip_len=1,\n", - " frame_interval=1,\n", - " num_clips=25,\n", - " test_mode=True),\n", - " dict(type='DecordDecode'),\n", - " dict(type='Resize', scale=(-1, 256)),\n", - " dict(type='TenCrop', crop_size=224),\n", - " dict(type='FormatShape', input_format='NCHW'),\n", - " dict(type='PackActionInputs')\n", - " ],\n", - " test_mode=True))\n", - "val_evaluator = dict(type='AccMetric')\n", - "test_evaluator = dict(type='AccMetric')\n", - "auto_scale_lr = dict(enable=False, base_batch_size=256)\n", - "work_dir = './tutorial_exps'\n", - "\n", - "05/15 03:33:35 - mmengine - INFO - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used.\n", - "05/15 03:33:35 - mmengine - INFO - Hooks will be executed in the following order:\n", - "before_run:\n", - "(VERY_HIGH ) RuntimeInfoHook \n", - "(BELOW_NORMAL) LoggerHook \n", - " -------------------- \n", - "before_train:\n", - "(VERY_HIGH ) RuntimeInfoHook \n", - "(NORMAL ) IterTimerHook \n", - "(VERY_LOW ) CheckpointHook \n", - " -------------------- \n", - "before_train_epoch:\n", - "(VERY_HIGH ) RuntimeInfoHook \n", - "(NORMAL ) IterTimerHook \n", - "(NORMAL ) DistSamplerSeedHook \n", - " -------------------- \n", - "before_train_iter:\n", - "(VERY_HIGH ) RuntimeInfoHook \n", - "(NORMAL ) IterTimerHook \n", - " -------------------- \n", - "after_train_iter:\n", - "(VERY_HIGH ) RuntimeInfoHook \n", - "(NORMAL ) IterTimerHook \n", - "(BELOW_NORMAL) LoggerHook \n", - "(LOW ) ParamSchedulerHook \n", - "(VERY_LOW ) CheckpointHook \n", - " -------------------- \n", - "after_train_epoch:\n", - "(NORMAL ) IterTimerHook \n", - "(NORMAL ) SyncBuffersHook \n", - "(LOW ) ParamSchedulerHook \n", - "(VERY_LOW ) CheckpointHook \n", - " -------------------- \n", - "before_val_epoch:\n", - "(NORMAL ) IterTimerHook \n", - "(NORMAL ) SyncBuffersHook \n", - " -------------------- \n", - "before_val_iter:\n", - "(NORMAL ) IterTimerHook \n", - " -------------------- \n", - "after_val_iter:\n", - "(NORMAL ) IterTimerHook \n", - "(BELOW_NORMAL) LoggerHook \n", - " -------------------- \n", - "after_val_epoch:\n", - "(VERY_HIGH ) RuntimeInfoHook \n", - "(NORMAL ) IterTimerHook \n", - "(BELOW_NORMAL) LoggerHook \n", - "(LOW ) ParamSchedulerHook \n", - "(VERY_LOW ) CheckpointHook \n", - " -------------------- \n", - "after_train:\n", - "(VERY_LOW ) CheckpointHook \n", - " -------------------- \n", - "before_test_epoch:\n", - "(NORMAL ) IterTimerHook \n", - " -------------------- \n", - "before_test_iter:\n", - "(NORMAL ) IterTimerHook \n", - " -------------------- \n", - "after_test_iter:\n", - "(NORMAL ) IterTimerHook \n", - "(BELOW_NORMAL) LoggerHook \n", - " -------------------- \n", - "after_test_epoch:\n", - "(VERY_HIGH ) RuntimeInfoHook \n", - "(NORMAL ) IterTimerHook \n", - "(BELOW_NORMAL) LoggerHook \n", - " -------------------- \n", - "after_run:\n", - "(BELOW_NORMAL) LoggerHook \n", - " -------------------- \n", - "Loads checkpoint by http backend from path: https://download.pytorch.org/models/resnet50-11ad3fa6.pth\n" - ] - }, - { - "output_type": "stream", - "name": "stderr", - "text": [ - "Downloading: \"https://download.pytorch.org/models/resnet50-11ad3fa6.pth\" to /root/.cache/torch/hub/checkpoints/resnet50-11ad3fa6.pth\n" - ] - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "05/15 03:33:37 - mmengine - INFO - These parameters in pretrained checkpoint are not loaded: {'fc.weight', 'fc.bias'}\n", - "Loads checkpoint by local backend from path: ./checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth\n", - "The model and loaded state dict do not match exactly\n", - "\n", - "size mismatch for cls_head.fc_cls.weight: copying a param with shape torch.Size([400, 2048]) from checkpoint, the shape in current model is torch.Size([2, 2048]).\n", - "size mismatch for cls_head.fc_cls.bias: copying a param with shape torch.Size([400]) from checkpoint, the shape in current model is torch.Size([2]).\n", - "05/15 03:33:37 - mmengine - INFO - Load checkpoint from ./checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth\n", - "05/15 03:33:37 - mmengine - WARNING - \"FileClient\" will be deprecated in future. Please use io functions in https://mmengine.readthedocs.io/en/latest/api/fileio.html#file-io\n", - "05/15 03:33:37 - mmengine - INFO - Checkpoints will be saved to /content/mmaction2/tutorial_exps.\n", - "05/15 03:33:41 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", - "05/15 03:33:41 - mmengine - INFO - Epoch(train) [1][15/15] lr: 7.8125e-05 eta: 0:00:31 time: 0.2334 data_time: 0.0793 memory: 2917 grad_norm: 11.9900 loss: 0.6971 top1_acc: 1.0000 top5_acc: 1.0000 loss_cls: 0.6971\n", - "05/15 03:33:42 - mmengine - INFO - Epoch(val) [1][5/5] acc/top1: 0.3000 acc/top5: 1.0000 acc/mean1: 0.3000 data_time: 0.1994 time: 0.2254\n", - "05/15 03:33:42 - mmengine - INFO - The best checkpoint with 0.3000 acc/top1 at 1 epoch is saved to best_acc_top1_epoch_1.pth.\n", - "05/15 03:33:46 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", - "05/15 03:33:46 - mmengine - INFO - Epoch(train) [2][15/15] lr: 7.8125e-05 eta: 0:00:29 time: 0.2373 data_time: 0.1369 memory: 961 grad_norm: 12.4935 loss: 0.7158 top1_acc: 0.5000 top5_acc: 1.0000 loss_cls: 0.7158\n", - "05/15 03:33:48 - mmengine - INFO - Epoch(val) [2][5/5] acc/top1: 0.7000 acc/top5: 1.0000 acc/mean1: 0.7000 data_time: 0.2692 time: 0.3006\n", - "05/15 03:33:48 - mmengine - INFO - The previous best checkpoint /content/mmaction2/tutorial_exps/best_acc_top1_epoch_1.pth is removed\n", - "05/15 03:33:48 - mmengine - INFO - The best checkpoint with 0.7000 acc/top1 at 2 epoch is saved to best_acc_top1_epoch_2.pth.\n", - "05/15 03:33:51 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", - "05/15 03:33:51 - mmengine - INFO - Epoch(train) [3][15/15] lr: 7.8125e-05 eta: 0:00:24 time: 0.2112 data_time: 0.1163 memory: 961 grad_norm: 13.4063 loss: 0.7338 top1_acc: 0.0000 top5_acc: 1.0000 loss_cls: 0.7338\n", - "05/15 03:33:51 - mmengine - INFO - Saving checkpoint at 3 epochs\n", - "05/15 03:33:53 - mmengine - INFO - Epoch(val) [3][5/5] acc/top1: 0.4000 acc/top5: 1.0000 acc/mean1: 0.4000 data_time: 0.1669 time: 0.1906\n", - "05/15 03:33:56 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", - "05/15 03:33:56 - mmengine - INFO - Epoch(train) [4][15/15] lr: 7.8125e-05 eta: 0:00:19 time: 0.1750 data_time: 0.0907 memory: 961 grad_norm: 12.4322 loss: 0.6894 top1_acc: 0.0000 top5_acc: 1.0000 loss_cls: 0.6894\n", - "05/15 03:33:57 - mmengine - INFO - Epoch(val) [4][5/5] acc/top1: 0.7000 acc/top5: 1.0000 acc/mean1: 0.7000 data_time: 0.1791 time: 0.2030\n", - "05/15 03:34:00 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", - "05/15 03:34:00 - mmengine - INFO - Epoch(train) [5][15/15] lr: 7.8125e-05 eta: 0:00:16 time: 0.2016 data_time: 0.1155 memory: 961 grad_norm: 11.5982 loss: 0.6940 top1_acc: 0.0000 top5_acc: 1.0000 loss_cls: 0.6940\n", - "05/15 03:34:02 - mmengine - INFO - Epoch(val) [5][5/5] acc/top1: 0.7000 acc/top5: 1.0000 acc/mean1: 0.7000 data_time: 0.3145 time: 0.3455\n", - "05/15 03:34:05 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", - "05/15 03:34:05 - mmengine - INFO - Epoch(train) [6][15/15] lr: 7.8125e-05 eta: 0:00:13 time: 0.2366 data_time: 0.1440 memory: 961 grad_norm: 12.0952 loss: 0.6667 top1_acc: 0.0000 top5_acc: 1.0000 loss_cls: 0.6667\n", - "05/15 03:34:05 - mmengine - INFO - Saving checkpoint at 6 epochs\n", - "05/15 03:34:08 - mmengine - INFO - Epoch(val) [6][5/5] acc/top1: 0.6000 acc/top5: 1.0000 acc/mean1: 0.6000 data_time: 0.2172 time: 0.2403\n", - "05/15 03:34:10 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", - "05/15 03:34:10 - mmengine - INFO - Epoch(train) [7][15/15] lr: 7.8125e-05 eta: 0:00:09 time: 0.1784 data_time: 0.0942 memory: 961 grad_norm: 12.4209 loss: 0.6570 top1_acc: 1.0000 top5_acc: 1.0000 loss_cls: 0.6570\n", - "05/15 03:34:11 - mmengine - INFO - Epoch(val) [7][5/5] acc/top1: 0.9000 acc/top5: 1.0000 acc/mean1: 0.9000 data_time: 0.1898 time: 0.2118\n", - "05/15 03:34:11 - mmengine - INFO - The previous best checkpoint /content/mmaction2/tutorial_exps/best_acc_top1_epoch_2.pth is removed\n", - "05/15 03:34:12 - mmengine - INFO - The best checkpoint with 0.9000 acc/top1 at 7 epoch is saved to best_acc_top1_epoch_7.pth.\n", - "05/15 03:34:15 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", - "05/15 03:34:15 - mmengine - INFO - Epoch(train) [8][15/15] lr: 7.8125e-05 eta: 0:00:06 time: 0.2073 data_time: 0.1220 memory: 961 grad_norm: 11.4271 loss: 0.6241 top1_acc: 1.0000 top5_acc: 1.0000 loss_cls: 0.6241\n", - "05/15 03:34:17 - mmengine - INFO - Epoch(val) [8][5/5] acc/top1: 1.0000 acc/top5: 1.0000 acc/mean1: 1.0000 data_time: 0.3497 time: 0.3890\n", - "05/15 03:34:17 - mmengine - INFO - The previous best checkpoint /content/mmaction2/tutorial_exps/best_acc_top1_epoch_7.pth is removed\n", - "05/15 03:34:18 - mmengine - INFO - The best checkpoint with 1.0000 acc/top1 at 8 epoch is saved to best_acc_top1_epoch_8.pth.\n", - "05/15 03:34:21 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", - "05/15 03:34:21 - mmengine - INFO - Epoch(train) [9][15/15] lr: 7.8125e-05 eta: 0:00:03 time: 0.2309 data_time: 0.1390 memory: 961 grad_norm: 12.3066 loss: 0.6451 top1_acc: 0.5000 top5_acc: 1.0000 loss_cls: 0.6451\n", - "05/15 03:34:21 - mmengine - INFO - Saving checkpoint at 9 epochs\n", - "05/15 03:34:23 - mmengine - INFO - Epoch(val) [9][5/5] acc/top1: 1.0000 acc/top5: 1.0000 acc/mean1: 1.0000 data_time: 0.2023 time: 0.2256\n", - "05/15 03:34:26 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", - "05/15 03:34:26 - mmengine - INFO - Epoch(train) [10][15/15] lr: 7.8125e-05 eta: 0:00:00 time: 0.1733 data_time: 0.0951 memory: 961 grad_norm: 11.1461 loss: 0.5931 top1_acc: 1.0000 top5_acc: 1.0000 loss_cls: 0.5931\n", - "05/15 03:34:26 - mmengine - INFO - Saving checkpoint at 10 epochs\n", - "05/15 03:34:27 - mmengine - INFO - Epoch(val) [10][5/5] acc/top1: 1.0000 acc/top5: 1.0000 acc/mean1: 1.0000 data_time: 0.1836 time: 0.2048\n" - ] - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "Recognizer2D(\n", - " (data_preprocessor): ActionDataPreprocessor()\n", - " (backbone): ResNet(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n", - " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n", - " (layer1): Sequential(\n", - " (0): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " (downsample): ConvModule(\n", - " (conv): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (layer2): Sequential(\n", - " (0): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " (downsample): ConvModule(\n", - " (conv): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", - " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (layer3): Sequential(\n", - " (0): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " (downsample): ConvModule(\n", - " (conv): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", - " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (3): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (4): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (5): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " (layer4): Sequential(\n", - " (0): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " (downsample): ConvModule(\n", - " (conv): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", - " (bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " )\n", - " (1): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " (2): Bottleneck(\n", - " (conv1): ConvModule(\n", - " (conv): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv2): ConvModule(\n", - " (conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (activate): ReLU(inplace=True)\n", - " )\n", - " (conv3): ConvModule(\n", - " (conv): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", - " (bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " )\n", - " (relu): ReLU(inplace=True)\n", - " )\n", - " )\n", - " )\n", - " (cls_head): TSNHead(\n", - " (loss_cls): CrossEntropyLoss()\n", - " (consensus): AvgConsensus()\n", - " (avg_pool): AdaptiveAvgPool2d(output_size=(1, 1))\n", - " (dropout): Dropout(p=0.4, inplace=False)\n", - " (fc_cls): Linear(in_features=2048, out_features=2, bias=True)\n", - " )\n", - ")" - ] - }, - "metadata": {}, - "execution_count": 15 - } - ], - "source": [ - "import os.path as osp\n", - "import mmengine\n", - "from mmengine.runner import Runner\n", - "\n", - "# Create work_dir\n", - "mmengine.mkdir_or_exist(osp.abspath(cfg.work_dir))\n", - "\n", - "# build the runner from config\n", - "runner = Runner.from_cfg(cfg)\n", - "\n", - "# start training\n", - "runner.train()" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "05/15 03:33:37 - mmengine - INFO - These parameters in pretrained checkpoint are not loaded: {'fc.weight', 'fc.bias'}\n", + "Loads checkpoint by local backend from path: ./checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth\n", + "The model and loaded state dict do not match exactly\n", + "\n", + "size mismatch for cls_head.fc_cls.weight: copying a param with shape torch.Size([400, 2048]) from checkpoint, the shape in current model is torch.Size([2, 2048]).\n", + "size mismatch for cls_head.fc_cls.bias: copying a param with shape torch.Size([400]) from checkpoint, the shape in current model is torch.Size([2]).\n", + "05/15 03:33:37 - mmengine - INFO - Load checkpoint from ./checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth\n", + "05/15 03:33:37 - mmengine - WARNING - \"FileClient\" will be deprecated in future. Please use io functions in https://mmengine.readthedocs.io/en/latest/api/fileio.html#file-io\n", + "05/15 03:33:37 - mmengine - INFO - Checkpoints will be saved to /content/mmaction2/tutorial_exps.\n", + "05/15 03:33:41 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", + "05/15 03:33:41 - mmengine - INFO - Epoch(train) [1][15/15] lr: 7.8125e-05 eta: 0:00:31 time: 0.2334 data_time: 0.0793 memory: 2917 grad_norm: 11.9900 loss: 0.6971 top1_acc: 1.0000 top5_acc: 1.0000 loss_cls: 0.6971\n", + "05/15 03:33:42 - mmengine - INFO - Epoch(val) [1][5/5] acc/top1: 0.3000 acc/top5: 1.0000 acc/mean1: 0.3000 data_time: 0.1994 time: 0.2254\n", + "05/15 03:33:42 - mmengine - INFO - The best checkpoint with 0.3000 acc/top1 at 1 epoch is saved to best_acc_top1_epoch_1.pth.\n", + "05/15 03:33:46 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", + "05/15 03:33:46 - mmengine - INFO - Epoch(train) [2][15/15] lr: 7.8125e-05 eta: 0:00:29 time: 0.2373 data_time: 0.1369 memory: 961 grad_norm: 12.4935 loss: 0.7158 top1_acc: 0.5000 top5_acc: 1.0000 loss_cls: 0.7158\n", + "05/15 03:33:48 - mmengine - INFO - Epoch(val) [2][5/5] acc/top1: 0.7000 acc/top5: 1.0000 acc/mean1: 0.7000 data_time: 0.2692 time: 0.3006\n", + "05/15 03:33:48 - mmengine - INFO - The previous best checkpoint /content/mmaction2/tutorial_exps/best_acc_top1_epoch_1.pth is removed\n", + "05/15 03:33:48 - mmengine - INFO - The best checkpoint with 0.7000 acc/top1 at 2 epoch is saved to best_acc_top1_epoch_2.pth.\n", + "05/15 03:33:51 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", + "05/15 03:33:51 - mmengine - INFO - Epoch(train) [3][15/15] lr: 7.8125e-05 eta: 0:00:24 time: 0.2112 data_time: 0.1163 memory: 961 grad_norm: 13.4063 loss: 0.7338 top1_acc: 0.0000 top5_acc: 1.0000 loss_cls: 0.7338\n", + "05/15 03:33:51 - mmengine - INFO - Saving checkpoint at 3 epochs\n", + "05/15 03:33:53 - mmengine - INFO - Epoch(val) [3][5/5] acc/top1: 0.4000 acc/top5: 1.0000 acc/mean1: 0.4000 data_time: 0.1669 time: 0.1906\n", + "05/15 03:33:56 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", + "05/15 03:33:56 - mmengine - INFO - Epoch(train) [4][15/15] lr: 7.8125e-05 eta: 0:00:19 time: 0.1750 data_time: 0.0907 memory: 961 grad_norm: 12.4322 loss: 0.6894 top1_acc: 0.0000 top5_acc: 1.0000 loss_cls: 0.6894\n", + "05/15 03:33:57 - mmengine - INFO - Epoch(val) [4][5/5] acc/top1: 0.7000 acc/top5: 1.0000 acc/mean1: 0.7000 data_time: 0.1791 time: 0.2030\n", + "05/15 03:34:00 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", + "05/15 03:34:00 - mmengine - INFO - Epoch(train) [5][15/15] lr: 7.8125e-05 eta: 0:00:16 time: 0.2016 data_time: 0.1155 memory: 961 grad_norm: 11.5982 loss: 0.6940 top1_acc: 0.0000 top5_acc: 1.0000 loss_cls: 0.6940\n", + "05/15 03:34:02 - mmengine - INFO - Epoch(val) [5][5/5] acc/top1: 0.7000 acc/top5: 1.0000 acc/mean1: 0.7000 data_time: 0.3145 time: 0.3455\n", + "05/15 03:34:05 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", + "05/15 03:34:05 - mmengine - INFO - Epoch(train) [6][15/15] lr: 7.8125e-05 eta: 0:00:13 time: 0.2366 data_time: 0.1440 memory: 961 grad_norm: 12.0952 loss: 0.6667 top1_acc: 0.0000 top5_acc: 1.0000 loss_cls: 0.6667\n", + "05/15 03:34:05 - mmengine - INFO - Saving checkpoint at 6 epochs\n", + "05/15 03:34:08 - mmengine - INFO - Epoch(val) [6][5/5] acc/top1: 0.6000 acc/top5: 1.0000 acc/mean1: 0.6000 data_time: 0.2172 time: 0.2403\n", + "05/15 03:34:10 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", + "05/15 03:34:10 - mmengine - INFO - Epoch(train) [7][15/15] lr: 7.8125e-05 eta: 0:00:09 time: 0.1784 data_time: 0.0942 memory: 961 grad_norm: 12.4209 loss: 0.6570 top1_acc: 1.0000 top5_acc: 1.0000 loss_cls: 0.6570\n", + "05/15 03:34:11 - mmengine - INFO - Epoch(val) [7][5/5] acc/top1: 0.9000 acc/top5: 1.0000 acc/mean1: 0.9000 data_time: 0.1898 time: 0.2118\n", + "05/15 03:34:11 - mmengine - INFO - The previous best checkpoint /content/mmaction2/tutorial_exps/best_acc_top1_epoch_2.pth is removed\n", + "05/15 03:34:12 - mmengine - INFO - The best checkpoint with 0.9000 acc/top1 at 7 epoch is saved to best_acc_top1_epoch_7.pth.\n", + "05/15 03:34:15 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", + "05/15 03:34:15 - mmengine - INFO - Epoch(train) [8][15/15] lr: 7.8125e-05 eta: 0:00:06 time: 0.2073 data_time: 0.1220 memory: 961 grad_norm: 11.4271 loss: 0.6241 top1_acc: 1.0000 top5_acc: 1.0000 loss_cls: 0.6241\n", + "05/15 03:34:17 - mmengine - INFO - Epoch(val) [8][5/5] acc/top1: 1.0000 acc/top5: 1.0000 acc/mean1: 1.0000 data_time: 0.3497 time: 0.3890\n", + "05/15 03:34:17 - mmengine - INFO - The previous best checkpoint /content/mmaction2/tutorial_exps/best_acc_top1_epoch_7.pth is removed\n", + "05/15 03:34:18 - mmengine - INFO - The best checkpoint with 1.0000 acc/top1 at 8 epoch is saved to best_acc_top1_epoch_8.pth.\n", + "05/15 03:34:21 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", + "05/15 03:34:21 - mmengine - INFO - Epoch(train) [9][15/15] lr: 7.8125e-05 eta: 0:00:03 time: 0.2309 data_time: 0.1390 memory: 961 grad_norm: 12.3066 loss: 0.6451 top1_acc: 0.5000 top5_acc: 1.0000 loss_cls: 0.6451\n", + "05/15 03:34:21 - mmengine - INFO - Saving checkpoint at 9 epochs\n", + "05/15 03:34:23 - mmengine - INFO - Epoch(val) [9][5/5] acc/top1: 1.0000 acc/top5: 1.0000 acc/mean1: 1.0000 data_time: 0.2023 time: 0.2256\n", + "05/15 03:34:26 - mmengine - INFO - Exp name: tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20230515_033334\n", + "05/15 03:34:26 - mmengine - INFO - Epoch(train) [10][15/15] lr: 7.8125e-05 eta: 0:00:00 time: 0.1733 data_time: 0.0951 memory: 961 grad_norm: 11.1461 loss: 0.5931 top1_acc: 1.0000 top5_acc: 1.0000 loss_cls: 0.5931\n", + "05/15 03:34:26 - mmengine - INFO - Saving checkpoint at 10 epochs\n", + "05/15 03:34:27 - mmengine - INFO - Epoch(val) [10][5/5] acc/top1: 1.0000 acc/top5: 1.0000 acc/mean1: 1.0000 data_time: 0.1836 time: 0.2048\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "zdSd7oTLlxIf" - }, - "source": [ - "### Understand the log\n", - "From the log, we can have a basic understanding the training process and know how well the recognizer is trained.\n", - "\n", - "Firstly, the ResNet-50 backbone pre-trained on ImageNet is loaded, this is a common practice since training from scratch is more cost. The log shows that all the weights of the ResNet-50 backbone are loaded except the `fc.bias` and `fc.weight`.\n", - "\n", - "Second, since the dataset we are using is small, we loaded a TSN model and finetune it for action recognition.\n", - "The original TSN is trained on original Kinetics-400 dataset which contains 400 classes but Kinetics-400 Tiny dataset only have 2 classes. Therefore, the last FC layer of the pre-trained TSN for classification has different weight shape and is not used.\n", - "\n", - "Third, after training, the recognizer is evaluated by the default evaluation. The results show that the recognizer achieves 100% top1 accuracy and 100% top5 accuracy on the val dataset,\n", - " \n", - "Not bad!" + "output_type": "execute_result", + "data": { + "text/plain": [ + "Recognizer2D(\n", + " (data_preprocessor): ActionDataPreprocessor()\n", + " (backbone): ResNet(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n", + " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n", + " (layer1): Sequential(\n", + " (0): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " (downsample): ConvModule(\n", + " (conv): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (layer2): Sequential(\n", + " (0): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " (downsample): ConvModule(\n", + " (conv): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (layer3): Sequential(\n", + " (0): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " (downsample): ConvModule(\n", + " (conv): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (3): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (4): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (5): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " (layer4): Sequential(\n", + " (0): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " (downsample): ConvModule(\n", + " (conv): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " (2): Bottleneck(\n", + " (conv1): ConvModule(\n", + " (conv): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv2): ConvModule(\n", + " (conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (activate): ReLU(inplace=True)\n", + " )\n", + " (conv3): ConvModule(\n", + " (conv): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n", + " (bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (relu): ReLU(inplace=True)\n", + " )\n", + " )\n", + " )\n", + " (cls_head): TSNHead(\n", + " (loss_cls): CrossEntropyLoss()\n", + " (consensus): AvgConsensus()\n", + " (avg_pool): AdaptiveAvgPool2d(output_size=(1, 1))\n", + " (dropout): Dropout(p=0.4, inplace=False)\n", + " (fc_cls): Linear(in_features=2048, out_features=2, bias=True)\n", + " )\n", + ")" ] + }, + "metadata": {}, + "execution_count": 15 + } + ], + "source": [ + "import os.path as osp\n", + "import mmengine\n", + "from mmengine.runner import Runner\n", + "\n", + "# Create work_dir\n", + "mmengine.mkdir_or_exist(osp.abspath(cfg.work_dir))\n", + "\n", + "# build the runner from config\n", + "runner = Runner.from_cfg(cfg)\n", + "\n", + "# start training\n", + "runner.train()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zdSd7oTLlxIf" + }, + "source": [ + "### Understand the log\n", + "From the log, we can have a basic understanding the training process and know how well the recognizer is trained.\n", + "\n", + "Firstly, the ResNet-50 backbone pre-trained on ImageNet is loaded, this is a common practice since training from scratch is more cost. The log shows that all the weights of the ResNet-50 backbone are loaded except the `fc.bias` and `fc.weight`.\n", + "\n", + "Second, since the dataset we are using is small, we loaded a TSN model and finetune it for action recognition.\n", + "The original TSN is trained on original Kinetics-400 dataset which contains 400 classes but Kinetics-400 Tiny dataset only have 2 classes. Therefore, the last FC layer of the pre-trained TSN for classification has different weight shape and is not used.\n", + "\n", + "Third, after training, the recognizer is evaluated by the default evaluation. The results show that the recognizer achieves 100% top1 accuracy and 100% top5 accuracy on the val dataset,\n", + " \n", + "Not bad!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ryVoSfZVmogw" + }, + "source": [ + "## Test the trained recognizer\n", + "\n", + "After finetuning the recognizer, let's check the prediction results!" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "eyY3hCMwyTct", + "outputId": "34fbbdc5-b9fd-4fd2-8030-3ba56b10adbf" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "ryVoSfZVmogw" - }, - "source": [ - "## Test the trained recognizer\n", - "\n", - "After finetuning the recognizer, let's check the prediction results!" - ] + "output_type": "stream", + "name": "stdout", + "text": [ + "05/15 03:34:36 - mmengine - INFO - Epoch(test) [10/10] acc/top1: 0.9000 acc/top5: 1.0000 acc/mean1: 0.9000 data_time: 0.0586 time: 0.7817\n" + ] }, { - "cell_type": "code", - "execution_count": 16, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "eyY3hCMwyTct", - "outputId": "34fbbdc5-b9fd-4fd2-8030-3ba56b10adbf" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "05/15 03:34:36 - mmengine - INFO - Epoch(test) [10/10] acc/top1: 0.9000 acc/top5: 1.0000 acc/mean1: 0.9000 data_time: 0.0586 time: 0.7817\n" - ] - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "{'acc/top1': 0.9, 'acc/top5': 1.0, 'acc/mean1': 0.9}" - ] - }, - "metadata": {}, - "execution_count": 16 - } - ], - "source": [ - "runner.test()" + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'acc/top1': 0.9, 'acc/top5': 1.0, 'acc/mean1': 0.9}" ] + }, + "metadata": {}, + "execution_count": 16 } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "mmact_dev", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.12" - }, - "vscode": { - "interpreter": { - "hash": "189c342a4747645665e89db23000ac4d4edb7a87c4cd0b2f881610f468fb778d" - } - } + ], + "source": [ + "runner.test()" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "mmact_dev", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" }, - "nbformat": 4, - "nbformat_minor": 0 + "vscode": { + "interpreter": { + "hash": "189c342a4747645665e89db23000ac4d4edb7a87c4cd0b2f881610f468fb778d" + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/demo/webcam_demo.py b/demo/webcam_demo.py index cdd8585540..de87c8aa32 100644 --- a/demo/webcam_demo.py +++ b/demo/webcam_demo.py @@ -139,7 +139,7 @@ def inference(): # Forward the model with torch.no_grad(): result = model.test_step(cur_data)[0] - scores = result.pred_scores.item.tolist() + scores = result.pred_score.tolist() scores = np.array(scores) score_cache.append(scores) scores_sum += scores diff --git a/docs/en/get_started/guide_to_framework.md b/docs/en/get_started/guide_to_framework.md index c65d65331b..3dc1c2314b 100644 --- a/docs/en/get_started/guide_to_framework.md +++ b/docs/en/get_started/guide_to_framework.md @@ -179,7 +179,8 @@ class VideoPack(BaseTransform): def transform(self, results): packed_results = dict() inputs = to_tensor(results['imgs']) - data_sample = ActionDataSample().set_gt_labels(results['label']) + data_sample = ActionDataSample() + data_sample.set_gt_label(results['label']) metainfo = {k: results[k] for k in self.meta_keys if k in results} data_sample.set_metainfo(metainfo) packed_results['inputs'] = inputs @@ -219,7 +220,7 @@ print('num_clips: ', data_sample.num_clips) print('clip_len: ', data_sample.clip_len) # Get label of the inputs -print('label: ', data_sample.gt_labels.item) +print('label: ', data_sample.gt_label) ``` ``` @@ -321,7 +322,7 @@ print('num_clips: ', data_sample.num_clips) print('clip_len: ', data_sample.clip_len) # Get label of the inputs -print('label: ', data_sample.gt_labels.item) +print('label: ', data_sample.gt_label) from mmengine.runner import Runner @@ -481,7 +482,7 @@ class ClsHeadZelda(BaseModule): def loss(self, feats, data_samples): cls_scores = self(feats) - labels = torch.stack([x.gt_labels.item for x in data_samples]) + labels = torch.stack([x.gt_label for x in data_samples]) labels = labels.squeeze() if labels.shape == torch.Size([]): @@ -589,8 +590,8 @@ with torch.no_grad(): data_batch_test = copy.deepcopy(batched_packed_results) data = model.data_preprocessor(data_batch_test, training=False) predictions = model(**data, mode='predict') -print('Label of Sample[0]', predictions[0].gt_labels.item) -print('Scores of Sample[0]', predictions[0].pred_scores.item) +print('Label of Sample[0]', predictions[0].gt_label) +print('Scores of Sample[0]', predictions[0].pred_score) ``` ```shell @@ -661,8 +662,8 @@ class AccuracyMetric(BaseMetric): data_samples = copy.deepcopy(data_samples) for data_sample in data_samples: result = dict() - scores = data_sample['pred_scores']['item'].cpu().numpy() - label = data_sample['gt_labels']['item'].item() + scores = data_sample['pred_score'].cpu().numpy() + label = data_sample['gt_label'].item() result['scores'] = scores result['label'] = label self.results.append(result) diff --git a/docs/en/get_started/installation.md b/docs/en/get_started/installation.md index 8cc64b7798..1685f97478 100644 --- a/docs/en/get_started/installation.md +++ b/docs/en/get_started/installation.md @@ -121,7 +121,7 @@ label_file = 'tools/data/kinetics/label_map_k400.txt' model = init_recognizer(config_file, checkpoint_file, device='cpu') # or device='cuda:0' pred_result = inference_recognizer(model, video_file) -pred_scores = pred_result.pred_scores.item.tolist() +pred_scores = pred_result.pred_score.tolist() score_tuples = tuple(zip(range(len(pred_scores)), pred_scores)) score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True) top5_label = score_sorted[:5] diff --git a/docs/zh_cn/get_started/guide_to_framework.md b/docs/zh_cn/get_started/guide_to_framework.md index b92c376b5d..0dc6462195 100644 --- a/docs/zh_cn/get_started/guide_to_framework.md +++ b/docs/zh_cn/get_started/guide_to_framework.md @@ -180,7 +180,7 @@ class VideoPack(BaseTransform): def transform(self, results): packed_results = dict() inputs = to_tensor(results['imgs']) - data_sample = ActionDataSample().set_gt_labels(results['label']) + data_sample = ActionDataSample().set_gt_label(results['label']) metainfo = {k: results[k] for k in self.meta_keys if k in results} data_sample.set_metainfo(metainfo) packed_results['inputs'] = inputs @@ -220,7 +220,7 @@ print('num_clips: ', data_sample.num_clips) print('clip_len: ', data_sample.clip_len) # ่Žทๅ–่พ“ๅ…ฅ็š„ๆ ‡็ญพ -print('label: ', data_sample.gt_labels.item) +print('label: ', data_sample.gt_label) ``` ``` @@ -322,7 +322,7 @@ print('num_clips: ', data_sample.num_clips) print('clip_len: ', data_sample.clip_len) # ่Žทๅ–่พ“ๅ…ฅ็š„ๆ ‡็ญพ -print('label: ', data_sample.gt_labels.item) +print('label: ', data_sample.gt_label) from mmengine.runner import Runner @@ -482,7 +482,7 @@ class ClsHeadZelda(BaseModule): def loss(self, feats, data_samples): cls_scores = self(feats) - labels = torch.stack([x.gt_labels.item for x in data_samples]) + labels = torch.stack([x.gt_label for x in data_samples]) labels = labels.squeeze() if labels.shape == torch.Size([]): @@ -590,8 +590,8 @@ with torch.no_grad(): data_batch_test = copy.deepcopy(batched_packed_results) data = model.data_preprocessor(data_batch_test, training=False) predictions = model(**data, mode='predict') -print('Label of Sample[0]', predictions[0].gt_labels.item) -print('Scores of Sample[0]', predictions[0].pred_scores.item) +print('Label of Sample[0]', predictions[0].gt_label) +print('Scores of Sample[0]', predictions[0].pred_score) ``` ```shell @@ -662,8 +662,8 @@ class AccuracyMetric(BaseMetric): data_samples = copy.deepcopy(data_samples) for data_sample in data_samples: result = dict() - scores = data_sample['pred_scores']['item'].cpu().numpy() - label = data_sample['gt_labels']['item'].item() + scores = data_sample['pred_score'].cpu().numpy() + label = data_sample['gt_label'].item() result['scores'] = scores result['label'] = label self.results.append(result) diff --git a/docs/zh_cn/get_started/installation.md b/docs/zh_cn/get_started/installation.md index 0e144ce6eb..091a8a5e03 100644 --- a/docs/zh_cn/get_started/installation.md +++ b/docs/zh_cn/get_started/installation.md @@ -120,7 +120,7 @@ label_file = 'tools/data/kinetics/label_map_k400.txt' model = init_recognizer(config_file, checkpoint_file, device='cpu') # or device='cuda:0' pred_result = inference_recognizer(model, video_file) -pred_scores = pred_result.pred_scores.item.tolist() +pred_scores = pred_result.pred_score.tolist() score_tuples = tuple(zip(range(len(pred_scores)), pred_scores)) score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True) top5_label = score_sorted[:5] diff --git a/mmaction/apis/inference.py b/mmaction/apis/inference.py index 749395099e..4b2b4f8c4b 100644 --- a/mmaction/apis/inference.py +++ b/mmaction/apis/inference.py @@ -70,7 +70,7 @@ def inference_recognizer(model: nn.Module, Returns: :obj:`ActionDataSample`: The inference results. Specifically, the - predicted scores are saved at ``result.pred_scores.item``. + predicted scores are saved at ``result.pred_score``. """ if test_pipeline is None: @@ -131,7 +131,7 @@ def inference_skeleton(model: nn.Module, Returns: :obj:`ActionDataSample`: The inference results. Specifically, the - predicted scores are saved at ``result.pred_scores.item``. + predicted scores are saved at ``result.pred_score``. """ if test_pipeline is None: cfg = model.cfg diff --git a/mmaction/apis/inferencers/actionrecog_inferencer.py b/mmaction/apis/inferencers/actionrecog_inferencer.py index f45f137b59..cc6e60b0da 100644 --- a/mmaction/apis/inferencers/actionrecog_inferencer.py +++ b/mmaction/apis/inferencers/actionrecog_inferencer.py @@ -356,6 +356,6 @@ def pred2dict(self, data_sample: ActionDataSample) -> Dict: dict: The output dictionary. """ result = {} - result['pred_labels'] = data_sample.pred_labels.item.tolist() - result['pred_scores'] = data_sample.pred_scores.item.tolist() + result['pred_labels'] = data_sample.pred_label.tolist() + result['pred_scores'] = data_sample.pred_score.tolist() return result diff --git a/mmaction/datasets/transforms/formatting.py b/mmaction/datasets/transforms/formatting.py index 168509be30..fb67e10c0e 100644 --- a/mmaction/datasets/transforms/formatting.py +++ b/mmaction/datasets/transforms/formatting.py @@ -4,7 +4,7 @@ import numpy as np import torch from mmcv.transforms import BaseTransform, to_tensor -from mmengine.structures import InstanceData, LabelData +from mmengine.structures import InstanceData from mmaction.registry import TRANSFORMS from mmaction.structures import ActionDataSample @@ -12,20 +12,11 @@ @TRANSFORMS.register_module() class PackActionInputs(BaseTransform): - """Pack the input data for the recognition. - - PackActionInputs first packs one of 'imgs', 'keypoint' and 'audios' into - the `packed_results['inputs']`, which are the three basic input modalities - for the task of rgb-based, skeleton-based and audio-based action - recognition, as well as spatio-temporal action detection in the case - of 'img'. Next, it prepares a `data_sample` for the task of action - recognition (only a single label of `torch.LongTensor` format, which is - saved in the `data_sample.gt_labels.item`) or spatio-temporal action - detection respectively. Then, it saves the meta keys defined in - the `meta_keys` in `data_sample.metainfo`, and packs the `data_sample` - into the `packed_results['data_samples']`. + """Pack the inputs data. Args: + collect_keys (tuple[str], optional): The keys to be collected + to ``packed_results['inputs']``. Defaults to `` meta_keys (Sequence[str]): The meta keys to saved in the `metainfo` of the `data_sample`. Defaults to ``('img_shape', 'img_key', 'video_id', 'timestamp')``. @@ -95,9 +86,7 @@ def transform(self, results: Dict) -> Dict: bboxes=to_tensor(results['proposals'])) if 'label' in results: - label_data = LabelData() - label_data.item = to_tensor(results['label']) - data_sample.gt_labels = label_data + data_sample.set_gt_label(results['label']) img_meta = {k: results[k] for k in self.meta_keys if k in results} data_sample.set_metainfo(img_meta) diff --git a/mmaction/evaluation/metrics/acc_metric.py b/mmaction/evaluation/metrics/acc_metric.py index 9abc20fa6c..04985e5938 100644 --- a/mmaction/evaluation/metrics/acc_metric.py +++ b/mmaction/evaluation/metrics/acc_metric.py @@ -75,17 +75,23 @@ def process(self, data_batch: Sequence[Tuple[Any, Dict]], data_samples = copy.deepcopy(data_samples) for data_sample in data_samples: result = dict() - pred = data_sample['pred_scores'] - label = data_sample['gt_labels'] - for item_name, score in pred.items(): - pred[item_name] = score.cpu().numpy() + pred = data_sample['pred_score'] + label = data_sample['gt_label'] + + # Ad-hoc for RGBPoseConv3D + if isinstance(pred, dict): + for item_name, score in pred.items(): + pred[item_name] = score.cpu().numpy() + else: + pred = pred.cpu().numpy() + result['pred'] = pred - if label['item'].size(0) == 1: + if label.size(0) == 1: # single-label - result['label'] = label['item'].item() + result['label'] = label.item() else: # multi-label - result['label'] = label['item'].cpu().numpy() + result['label'] = label.cpu().numpy() self.results.append(result) def compute_metrics(self, results: List) -> Dict: @@ -100,39 +106,41 @@ def compute_metrics(self, results: List) -> Dict: """ labels = [x['label'] for x in results] - if len(results[0]['pred']) == 1: - preds = [x['pred']['item'] for x in results] - return self.calculate(preds, labels) - eval_results = dict() - for item_name in results[0]['pred'].keys(): - preds = [x['pred'][item_name] for x in results] - eval_result = self.calculate(preds, labels) - eval_results.update( - {f'{item_name}_{k}': v - for k, v in eval_result.items()}) - # Ad-hoc for RGBPoseConv3D - if len(results[0]['pred']) == 2 and \ - 'rgb' in results[0]['pred'] and \ - 'pose' in results[0]['pred']: - - rgb = [x['pred']['rgb'] for x in results] - pose = [x['pred']['pose'] for x in results] - - preds = { - '1:1': get_weighted_score([rgb, pose], [1, 1]), - '2:1': get_weighted_score([rgb, pose], [2, 1]), - '1:2': get_weighted_score([rgb, pose], [1, 2]) - } - for k in preds: - eval_result = self.calculate(preds[k], labels) - eval_results.update({ - f'RGBPose_{k}_{key}': v - for key, v in eval_result.items() - }) - - return eval_results + if isinstance(results[0]['pred'], dict): + + for item_name in results[0]['pred'].keys(): + preds = [x['pred'][item_name] for x in results] + eval_result = self.calculate(preds, labels) + eval_results.update( + {f'{item_name}_{k}': v + for k, v in eval_result.items()}) + + if len(results[0]['pred']) == 2 and \ + 'rgb' in results[0]['pred'] and \ + 'pose' in results[0]['pred']: + + rgb = [x['pred']['rgb'] for x in results] + pose = [x['pred']['pose'] for x in results] + + preds = { + '1:1': get_weighted_score([rgb, pose], [1, 1]), + '2:1': get_weighted_score([rgb, pose], [2, 1]), + '1:2': get_weighted_score([rgb, pose], [1, 2]) + } + for k in preds: + eval_result = self.calculate(preds[k], labels) + eval_results.update({ + f'RGBPose_{k}_{key}': v + for key, v in eval_result.items() + }) + return eval_results + + # Simple Acc Calculation + else: + preds = [x['pred'] for x in results] + return self.calculate(preds, labels) def calculate(self, preds: List[np.ndarray], labels: List[Union[int, np.ndarray]]) -> Dict: @@ -238,13 +246,13 @@ def __init__(self, def process(self, data_batch, data_samples: Sequence[dict]) -> None: for data_sample in data_samples: - pred_scores = data_sample.get('pred_scores') - gt_label = data_sample['gt_labels']['item'] + pred_scores = data_sample.get('pred_score') + gt_label = data_sample['gt_label'] if pred_scores is not None: - pred_label = pred_scores['item'].argmax(dim=0, keepdim=True) - self.num_classes = pred_scores['item'].size(0) + pred_label = pred_scores.argmax(dim=0, keepdim=True) + self.num_classes = pred_scores.size(0) else: - pred_label = data_sample['pred_labels']['item'] + pred_label = data_sample['pred_label'] self.results.append({ 'pred_label': pred_label, diff --git a/mmaction/models/data_preprocessors/data_preprocessor.py b/mmaction/models/data_preprocessors/data_preprocessor.py index 891cb8f386..0376318ff7 100644 --- a/mmaction/models/data_preprocessors/data_preprocessor.py +++ b/mmaction/models/data_preprocessors/data_preprocessor.py @@ -84,7 +84,7 @@ def forward(self, data = self.cast_data(data) if isinstance(data, dict): return self.forward_onesample(data, training=training) - elif isinstance(data, tuple): + elif isinstance(data, (tuple, list)): outputs = [] for data_sample in data: output = self.forward_onesample(data_sample, training=training) diff --git a/mmaction/models/heads/base.py b/mmaction/models/heads/base.py index c39da5aa9a..8febe1df5b 100644 --- a/mmaction/models/heads/base.py +++ b/mmaction/models/heads/base.py @@ -6,7 +6,6 @@ import torch.nn as nn import torch.nn.functional as F from mmengine.model import BaseModule -from mmengine.structures import LabelData from mmaction.evaluation import top_k_accuracy from mmaction.registry import MODELS @@ -112,7 +111,7 @@ def loss_by_feat(self, cls_scores: torch.Tensor, Returns: dict: A dictionary of loss components. """ - labels = [x.gt_labels.item for x in data_samples] + labels = [x.gt_label for x in data_samples] labels = torch.stack(labels).to(cls_scores.device) labels = labels.squeeze() @@ -175,7 +174,7 @@ def predict_by_feat(self, cls_scores: torch.Tensor, (B*num_segs, num_classes) data_samples (list[:obj:`ActionDataSample`]): The annotation data of every samples. It usually includes - information such as `gt_labels`. + information such as `gt_label`. Returns: List[:obj:`ActionDataSample`]: Recognition results wrapped @@ -187,10 +186,8 @@ def predict_by_feat(self, cls_scores: torch.Tensor, for data_sample, score, pred_label in zip(data_samples, cls_scores, pred_labels): - prediction = LabelData(item=score) - pred_label = LabelData(item=pred_label) - data_sample.pred_scores = prediction - data_sample.pred_labels = pred_label + data_sample.set_pred_score(score) + data_sample.set_pred_label(pred_label) return data_samples def average_clip(self, diff --git a/mmaction/models/heads/omni_head.py b/mmaction/models/heads/omni_head.py index f5084dde06..7a62cf56da 100644 --- a/mmaction/models/heads/omni_head.py +++ b/mmaction/models/heads/omni_head.py @@ -87,10 +87,7 @@ def loss_by_feat(self, cls_scores: Union[Tensor, Tuple[Tensor]], Returns: dict: A dictionary of loss components. """ - if hasattr(data_samples[0], 'gt_labels'): - labels = [x.gt_labels.item for x in data_samples] - else: - labels = [x.gt_label.label for x in data_samples] + labels = [x.gt_label for x in data_samples] labels = torch.stack(labels).to(cls_scores.device) labels = labels.squeeze() diff --git a/mmaction/models/heads/rgbpose_head.py b/mmaction/models/heads/rgbpose_head.py index 69da4efed9..880e37f084 100644 --- a/mmaction/models/heads/rgbpose_head.py +++ b/mmaction/models/heads/rgbpose_head.py @@ -5,7 +5,6 @@ import torch.nn as nn import torch.nn.functional as F from mmengine.model.weight_init import normal_init -from mmengine.structures import LabelData from mmaction.evaluation import top_k_accuracy from mmaction.registry import MODELS @@ -110,7 +109,7 @@ def loss_by_feat(self, cls_scores: Dict[str, torch.Tensor], Returns: dict: A dictionary of loss components. """ - labels = torch.stack([x.gt_labels.item for x in data_samples]) + labels = torch.stack([x.gt_label for x in data_samples]) labels = labels.squeeze() if labels.shape == torch.Size([]): @@ -192,34 +191,26 @@ def predict_by_feat(self, cls_scores: Dict[str, torch.Tensor], classification scores, data_samples (list[:obj:`ActionDataSample`]): The annotation data of every samples. It usually includes - information such as `gt_labels`. + information such as `gt_label`. Returns: list[:obj:`ActionDataSample`]: Recognition results wrapped by :obj:`ActionDataSample`. """ - pred_scores = [LabelData() for _ in range(len(data_samples))] - pred_labels = [LabelData() for _ in range(len(data_samples))] + pred_scores = [dict() for _ in range(len(data_samples))] for name in self.loss_components: cls_score = cls_scores[name] - cls_score, pred_label = \ - self.predict_by_scores(cls_score, data_samples) - for pred_score, pred_label, score, label in zip( - pred_scores, pred_labels, cls_score, pred_label): - pred_score.set_data({f'{name}': score}) - pred_label.set_data({f'{name}': label}) - - for data_sample, pred_score, pred_label in zip(data_samples, - pred_scores, - pred_labels): - data_sample.pred_scores = pred_score - data_sample.pred_labels = pred_label + cls_score = self.predict_by_scores(cls_score, data_samples) + for pred_score, score in zip(pred_scores, cls_score): + pred_score[f'{name}'] = score + for data_sample, pred_score, in zip(data_samples, pred_scores): + data_sample.set_pred_score(pred_score) return data_samples def predict_by_scores(self, cls_scores: torch.Tensor, - data_samples: SampleList) -> Tuple: + data_samples: SampleList) -> torch.Tensor: """Transform a batch of output features extracted from the head into prediction results. @@ -230,11 +221,9 @@ def predict_by_scores(self, cls_scores: torch.Tensor, data of every samples. Returns: - tuple: A tuple of the averaged classification scores and - prediction labels. + torch.Tensor: The averaged classification scores. """ num_segs = cls_scores.shape[0] // len(data_samples) cls_scores = self.average_clip(cls_scores, num_segs=num_segs) - pred_labels = cls_scores.argmax(dim=-1, keepdim=True).detach() - return cls_scores, pred_labels + return cls_scores diff --git a/mmaction/models/necks/tpn.py b/mmaction/models/necks/tpn.py index b3cdc92ff9..c04dde4123 100644 --- a/mmaction/models/necks/tpn.py +++ b/mmaction/models/necks/tpn.py @@ -254,7 +254,7 @@ def loss(self, x: torch.Tensor, data_samples: Optional[SampleList]) -> dict: """Calculate auxiliary loss.""" x = self(x) - labels = [x.gt_labels.item for x in data_samples] + labels = [x.gt_label for x in data_samples] labels = torch.stack(labels).to(x.device) labels = labels.squeeze() if labels.shape == torch.Size([]): diff --git a/mmaction/models/recognizers/base.py b/mmaction/models/recognizers/base.py index 7ce2a51b1f..ced45380cf 100644 --- a/mmaction/models/recognizers/base.py +++ b/mmaction/models/recognizers/base.py @@ -162,7 +162,7 @@ def loss(self, inputs: torch.Tensor, data_samples: SampleList, These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch data samples. It usually includes information such - as ``gt_labels``. + as ``gt_label``. Returns: dict: A dictionary of loss components. @@ -187,7 +187,7 @@ def predict(self, inputs: torch.Tensor, data_samples: SampleList, These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch data samples. It usually includes information such - as ``gt_labels``. + as ``gt_label``. Returns: List[``ActionDataSample``]: Return the recognition results. diff --git a/mmaction/models/utils/blending_utils.py b/mmaction/models/utils/blending_utils.py index 2d3732eeb1..855ca226b1 100644 --- a/mmaction/models/utils/blending_utils.py +++ b/mmaction/models/utils/blending_utils.py @@ -55,18 +55,18 @@ def __call__(self, imgs: torch.Tensor, batch_data_samples: SampleList, shape of (B, N, C, H, W) or (B, N, C, T, H, W). batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usually includes information such - as `gt_labels`. + as `gt_label`. Returns: mixed_imgs (torch.Tensor): Blending images, float tensor with the same shape of the input imgs. batch_data_samples (List[:obj:`ActionDataSample`]): The modified - batch data samples. ``gt_labels`` in each data sample are + batch data samples. ``gt_label`` in each data sample are converted from a hard label to a blended soft label, float tensor with the shape of (num_classes, ) and all elements are in range [0, 1]. """ - label = [x.gt_labels.item for x in batch_data_samples] + label = [x.gt_label for x in batch_data_samples] # single-label classification if label[0].size(0) == 1: label = torch.tensor(label, dtype=torch.long).to(imgs.device) @@ -79,7 +79,7 @@ def __call__(self, imgs: torch.Tensor, batch_data_samples: SampleList, **kwargs) for label_item, sample in zip(mixed_label, batch_data_samples): - sample.gt_labels.item = label_item + sample.set_gt_label(label_item) return mixed_imgs, batch_data_samples diff --git a/mmaction/structures/action_data_sample.py b/mmaction/structures/action_data_sample.py index 6ea146cba2..79bec540a0 100644 --- a/mmaction/structures/action_data_sample.py +++ b/mmaction/structures/action_data_sample.py @@ -1,15 +1,16 @@ # Copyright (c) OpenMMLab. All rights reserved. -from numbers import Number -from typing import Sequence, Union +from typing import Dict, Sequence, Union import numpy as np import torch -from mmengine.structures import BaseDataElement, InstanceData, LabelData +from mmengine.structures import BaseDataElement, InstanceData from mmengine.utils import is_str +LABEL_TYPE = Union[torch.Tensor, np.ndarray, Sequence, int] +SCORE_TYPE = Union[torch.Tensor, np.ndarray, Sequence, Dict] -def format_label(value: Union[torch.Tensor, np.ndarray, Sequence, - int]) -> torch.Tensor: + +def format_label(value: LABEL_TYPE) -> torch.Tensor: """Convert various python types to label-format tensor. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, @@ -19,7 +20,7 @@ def format_label(value: Union[torch.Tensor, np.ndarray, Sequence, value (torch.Tensor | numpy.ndarray | Sequence | int): Label value. Returns: - :obj:`torch.Tensor`: The foramtted label tensor. + :obj:`torch.Tensor`: The formatted label tensor. """ # Handle single number @@ -34,119 +35,62 @@ def format_label(value: Union[torch.Tensor, np.ndarray, Sequence, value = torch.LongTensor([value]) elif not isinstance(value, torch.Tensor): raise TypeError(f'Type {type(value)} is not an available label type.') - assert value.ndim == 1, \ - f'The dims of value should be 1, but got {value.ndim}.' return value -def format_score(value: Union[torch.Tensor, np.ndarray, - Sequence]) -> torch.Tensor: +def format_score(value: SCORE_TYPE) -> Union[torch.Tensor, Dict]: """Convert various python types to score-format tensor. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`. Args: - value (torch.Tensor | numpy.ndarray | Sequence): Score values. + value (torch.Tensor | numpy.ndarray | Sequence | dict): + Score values or dict of scores values. Returns: - :obj:`torch.Tensor`: The foramtted score tensor. + :obj:`torch.Tensor` | dict: The formatted scores. """ if isinstance(value, np.ndarray): value = torch.from_numpy(value).float() elif isinstance(value, Sequence) and not is_str(value): value = torch.tensor(value).float() + elif isinstance(value, dict): + for k, v in value.items(): + value[k] = format_score(v) elif not isinstance(value, torch.Tensor): raise TypeError(f'Type {type(value)} is not an available label type.') - assert value.ndim == 1, \ - f'The dims of value should be 1, but got {value.ndim}.' return value class ActionDataSample(BaseDataElement): - def set_gt_labels( - self, value: Union[np.ndarray, torch.Tensor, Sequence[Number], Number] - ) -> 'ActionDataSample': - """Set label of ``gt_labels``.""" - label_data = getattr(self, '_gt_label', LabelData()) - label_data.item = format_label(value) - self.gt_labels = label_data + def set_gt_label(self, value: LABEL_TYPE) -> 'ActionDataSample': + """Set `gt_label``.""" + self.set_field(format_label(value), 'gt_label', dtype=torch.Tensor) return self - def set_pred_label( - self, value: Union[np.ndarray, torch.Tensor, Sequence[Number], Number] - ) -> 'ActionDataSample': - """Set label of ``pred_label``.""" - label_data = getattr(self, '_pred_label', LabelData()) - label_data.item = format_label(value) - self.pred_labels = label_data + def set_pred_label(self, value: LABEL_TYPE) -> 'ActionDataSample': + """Set ``pred_label``.""" + self.set_field(format_label(value), 'pred_label', dtype=torch.Tensor) return self - def set_pred_score(self, value: torch.Tensor) -> 'ActionDataSample': + def set_pred_score(self, value: SCORE_TYPE) -> 'ActionDataSample': """Set score of ``pred_label``.""" - label_data = getattr(self, '_pred_label', LabelData()) - label_data.item = format_score(value) + score = format_score(value) + self.set_field(score, 'pred_score') if hasattr(self, 'num_classes'): - assert len(label_data.item) == self.num_classes, \ - f'The length of score {len(label_data.item)} should be '\ + assert len(score) == self.num_classes, \ + f'The length of score {len(score)} should be '\ f'equal to the num_classes {self.num_classes}.' else: self.set_field( - name='num_classes', - value=len(label_data.item), - field_type='metainfo') - self.pred_scores = label_data + name='num_classes', value=len(score), field_type='metainfo') return self - @property - def gt_labels(self): - """Property of `gt_labels`""" - return self._gt_labels - - @gt_labels.setter - def gt_labels(self, value): - """Setter of `gt_labels`""" - self.set_field(value, '_gt_labels', LabelData) - - @gt_labels.deleter - def gt_labels(self): - """Deleter of `gt_labels`""" - del self._gt_labels - - @property - def pred_scores(self): - """Property of `pred_scores`""" - return self._pred_scores - - @pred_scores.setter - def pred_scores(self, value): - """Setter of `pred_scores`""" - self.set_field(value, '_pred_scores', LabelData) - - @pred_scores.deleter - def pred_scores(self): - """Deleter of `pred_scores`""" - del self._pred_scores - - @property - def pred_labels(self): - """Property of `pred_labels`""" - return self._pred_labels - - @pred_labels.setter - def pred_labels(self, value): - """Setter of `pred_labels`""" - self.set_field(value, '_pred_labels', LabelData) - - @pred_labels.deleter - def pred_labels(self): - """Deleter of `pred_labels`""" - del self._pred_labels - @property def proposals(self): """Property of `proposals`""" diff --git a/mmaction/utils/gradcam_utils.py b/mmaction/utils/gradcam_utils.py index 23f124f554..3d1a7f8f47 100644 --- a/mmaction/utils/gradcam_utils.py +++ b/mmaction/utils/gradcam_utils.py @@ -94,11 +94,11 @@ def _calculate_localization_map(self, self.model.cls_head.average_clips = 'score' # model forward & backward results = self.model.test_step(data) - preds = [result.pred_scores.item for result in results] + preds = [result.pred_score for result in results] preds = torch.stack(preds) if use_labels: - labels = [result.gt_labels.item for result in results] + labels = [result.gt_label for result in results] labels = torch.stack(labels) score = torch.gather(preds, dim=1, index=labels) else: diff --git a/mmaction/visualization/action_visualizer.py b/mmaction/visualization/action_visualizer.py index 5924669c83..7a3bfab85e 100644 --- a/mmaction/visualization/action_visualizer.py +++ b/mmaction/visualization/action_visualizer.py @@ -63,7 +63,7 @@ class ActionVisualizer(Visualizer): >>> video = video.get_batch(range(32)).asnumpy() >>> # Example annotation >>> data_sample = ActionDataSample() - >>> data_sample.gt_labels = LabelData(item=torch.tensor([2])) + >>> data_sample.gt_label = LabelData(item=torch.tensor([2])) >>> # Setup the visualizer >>> vis = ActionVisualizer( ... save_dir="./outputs", @@ -215,8 +215,8 @@ def add_datasample(self, self.set_image(frame) if draw_gt and 'gt_labels' in data_sample: - gt_labels = data_sample.gt_labels - idx = gt_labels.item.tolist() + gt_labels = data_sample.gt_label + idx = gt_labels.tolist() class_labels = [''] * len(idx) if classes is not None: class_labels = [f' ({classes[i]})' for i in idx] diff --git a/projects/actionclip/README.md b/projects/actionclip/README.md index cfaf0e3f2b..df694fd538 100644 --- a/projects/actionclip/README.md +++ b/projects/actionclip/README.md @@ -140,7 +140,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu" model = init_recognizer(config=config, checkpoint=checkpoint_path, device=device) pred_result = inference_recognizer(model, 'test.mp4') -probs = pred_result.pred_scores.item.cpu().numpy() +probs = pred_result.pred_score.cpu().numpy() print("Label probs:", probs) # [9.995e-01 5.364e-07 6.666e-04] ``` diff --git a/tests/apis/test_inference.py b/tests/apis/test_inference.py index 1b004943f7..749c3af01b 100644 --- a/tests/apis/test_inference.py +++ b/tests/apis/test_inference.py @@ -66,7 +66,7 @@ def test_inference_recognizer(self, config, video_path, devices): result = inference_recognizer(model, video_path) self.assertIsInstance(result, ActionDataSample) - self.assertTrue(result.pred_scores.item.shape, (400, )) + self.assertTrue(result.pred_score.shape, (400, )) def test_detection_inference(self): from mmdet.apis import init_detector diff --git a/tests/datasets/transforms/test_formating.py b/tests/datasets/transforms/test_formating.py index e12a1a95d7..93e32249b5 100644 --- a/tests/datasets/transforms/test_formating.py +++ b/tests/datasets/transforms/test_formating.py @@ -34,7 +34,7 @@ def test_transform(self): self.assertIn('data_samples', results) self.assertIsInstance(results['inputs'], torch.Tensor) self.assertEqual(results['inputs'].shape, (2, 300, 17, 3)) - self.assertEqual(results['data_samples'].gt_labels.item, + self.assertEqual(results['data_samples'].gt_label, torch.LongTensor([1])) # heatmap_imgs input @@ -45,7 +45,7 @@ def test_transform(self): self.assertIn('data_samples', results) self.assertIsInstance(results['inputs'], torch.Tensor) self.assertEqual(results['inputs'].shape, (2, 17, 56, 56)) - self.assertEqual(results['data_samples'].gt_labels.item, + self.assertEqual(results['data_samples'].gt_label, torch.LongTensor([1])) # audios input @@ -82,7 +82,7 @@ def test_transform(self): self.assertIsInstance(results['inputs'], torch.Tensor) self.assertIsInstance(results['data_samples'], ActionDataSample) self.assertEqual(results['data_samples'].img_shape, (256, 256, 3)) - self.assertEqual(results['data_samples'].gt_labels.item, + self.assertEqual(results['data_samples'].gt_label, torch.LongTensor([1])) # Test grayscale image diff --git a/tests/evaluation/metrics/test_acc_metric.py b/tests/evaluation/metrics/test_acc_metric.py index aeb6fb2cb0..b0e966933e 100644 --- a/tests/evaluation/metrics/test_acc_metric.py +++ b/tests/evaluation/metrics/test_acc_metric.py @@ -26,8 +26,7 @@ def generate_data(num_classes=5, random_label=False, multi_label=False): label = torch.randint(num_classes, size=[1]) else: label = torch.LongTensor([scores.argmax().item()]) - data_sample = dict( - pred_scores=dict(item=scores), gt_labels=dict(item=label)) + data_sample = dict(pred_score=scores, gt_label=label) data_samples.append(data_sample) return data_batch, data_samples @@ -97,7 +96,7 @@ def test_evaluate(self): """Test using the metric in the same way as Evalutor.""" pred = [ ActionDataSample().set_pred_score(i).set_pred_label( - j).set_gt_labels(k).to_dict() for i, j, k in zip([ + j).set_gt_label(k).to_dict() for i, j, k in zip([ torch.tensor([0.7, 0.0, 0.3]), torch.tensor([0.5, 0.2, 0.3]), torch.tensor([0.4, 0.5, 0.1]), @@ -122,7 +121,7 @@ def test_evaluate(self): # Test with label for sample in pred: - del sample['pred_scores'] + del sample['pred_score'] metric = METRICS.build(dict(type='ConfusionMatrix')) metric.process(None, pred) with self.assertRaisesRegex(AssertionError, diff --git a/tests/models/data_preprocessors/test_data_preprocessor.py b/tests/models/data_preprocessors/test_data_preprocessor.py index 5fe3e8f663..9591305691 100644 --- a/tests/models/data_preprocessors/test_data_preprocessor.py +++ b/tests/models/data_preprocessors/test_data_preprocessor.py @@ -15,7 +15,7 @@ def generate_dummy_data(batch_size, input_shape): 'inputs': [torch.randint(0, 255, input_shape) for _ in range(batch_size)], 'data_samples': - [ActionDataSample().set_gt_labels(2) for _ in range(batch_size)] + [ActionDataSample().set_gt_label(2) for _ in range(batch_size)] } return data @@ -53,8 +53,8 @@ def test_data_preprocessor(): format_shape='NCTHW', blending=dict(type='MixupBlending', num_classes=5)) data = psr(deepcopy(raw_data), training=True) - assert data['data_samples'][0].gt_labels.item.shape == (5, ) - assert data['data_samples'][1].gt_labels.item.shape == (5, ) + assert data['data_samples'][0].gt_label.shape == (5, ) + assert data['data_samples'][1].gt_label.shape == (5, ) raw_data = generate_dummy_data(2, (1, 3, 224, 224)) psr = ActionDataPreprocessor( diff --git a/tests/models/data_preprocessors/test_multimodal_data_preprocessor.py b/tests/models/data_preprocessors/test_multimodal_data_preprocessor.py index 35483bd5d9..671d2c1c96 100644 --- a/tests/models/data_preprocessors/test_multimodal_data_preprocessor.py +++ b/tests/models/data_preprocessors/test_multimodal_data_preprocessor.py @@ -13,7 +13,7 @@ def generate_dummy_data(batch_size, input_keys, input_shapes): data = dict() data['data_samples'] = [ - ActionDataSample().set_gt_labels(2) for _ in range(batch_size) + ActionDataSample().set_gt_label(2) for _ in range(batch_size) ] data['inputs'] = dict() for key, shape in zip(input_keys, input_shapes): diff --git a/tests/models/heads/test_feature_head.py b/tests/models/heads/test_feature_head.py index 932ed87133..424016bc8d 100644 --- a/tests/models/heads/test_feature_head.py +++ b/tests/models/heads/test_feature_head.py @@ -27,7 +27,7 @@ def test_2d_recognizer(self): input_shape = [3, 3, 32, 32] data_batch = { 'inputs': [torch.randint(0, 256, input_shape)], - 'data_samples': [ActionDataSample().set_gt_labels(2)] + 'data_samples': [ActionDataSample().set_gt_label(2)] } feat = recognizer.test_step(data_batch) assert isinstance(feat, torch.Tensor) @@ -46,7 +46,7 @@ def test_3d_recognizer(self): input_shape = [1, 3, 4, 32, 32] data_batch = { 'inputs': [torch.randint(0, 256, input_shape)], - 'data_samples': [ActionDataSample().set_gt_labels(2)] + 'data_samples': [ActionDataSample().set_gt_label(2)] } feat = recognizer.test_step(data_batch) assert isinstance(feat, torch.Tensor) diff --git a/tests/models/heads/test_omni_head.py b/tests/models/heads/test_omni_head.py index f9181893af..2724830353 100644 --- a/tests/models/heads/test_omni_head.py +++ b/tests/models/heads/test_omni_head.py @@ -31,9 +31,7 @@ def testOmniHead(): video_feat = torch.randn(2, 400, 8, 8, 8) video_score = head(video_feat) assert video_score.shape == torch.Size([2, 200]) - data_samples = [ - obj('gt_label', obj('label', torch.tensor(1))) for _ in range(2) - ] + data_samples = [obj('gt_label', torch.tensor(1)) for _ in range(2)] losses = head.loss_by_feat(video_score, data_samples) assert 'loss_cls' in losses @@ -41,6 +39,6 @@ def testOmniHead(): head.eval() image_score = head(image_feat) assert image_score.shape == torch.Size([1, 100]) - data_samples = [obj('gt_labels', obj('item', torch.tensor(1)))] + data_samples = [obj('gt_label', torch.tensor(1))] losses = head.loss_by_feat(image_score, data_samples) assert 'loss_cls' in losses diff --git a/tests/models/necks/test_tpn.py b/tests/models/necks/test_tpn.py index 1e9387aa39..08cc17dedc 100644 --- a/tests/models/necks/test_tpn.py +++ b/tests/models/necks/test_tpn.py @@ -3,7 +3,6 @@ import pytest import torch -from mmengine.structures import LabelData from mmaction.models import TPN from mmaction.structures import ActionDataSample @@ -14,7 +13,7 @@ def get_label(label_): label = [] for idx, one_label in enumerate(label_): data_sample = ActionDataSample() - data_sample.gt_labels = LabelData(item=label_[idx]) + data_sample.set_gt_label(label_[idx]) label.append(data_sample) return label diff --git a/tests/models/recognizers/recognizer_omni.py b/tests/models/recognizers/recognizer_omni.py index 23c58748de..e06cd5c03f 100644 --- a/tests/models/recognizers/recognizer_omni.py +++ b/tests/models/recognizers/recognizer_omni.py @@ -12,8 +12,7 @@ def test_omni_resnet(): register_all_modules() config = get_recognizer_cfg( - 'omnisource/slowonly_r50_16xb16-8x8x1-256e_imagenet-kinetics400-rgb.py' - ) + 'omnisource/slowonly_r50_8xb16-8x8x1-256e_imagenet-kinetics400-rgb.py') recognizer = MODELS.build(config.model) # test train_step @@ -24,8 +23,8 @@ def test_omni_resnet(): torch.randint(0, 255, (1, 3, 8, 224, 224)) ], 'data_samples': [ - ActionDataSample().set_gt_labels(2), - ActionDataSample().set_gt_labels(2) + ActionDataSample().set_gt_label(2), + ActionDataSample().set_gt_label(2) ] } @@ -35,8 +34,8 @@ def test_omni_resnet(): torch.randint(0, 255, (1, 3, 224, 224)) ], 'data_samples': [ - ActionDataSample().set_gt_labels(2), - ActionDataSample().set_gt_labels(2) + ActionDataSample().set_gt_label(2), + ActionDataSample().set_gt_label(2) ] } @@ -54,7 +53,7 @@ def test_omni_resnet(): # test test_step with torch.no_grad(): predictions = recognizer.test_step(video_sample) - score = predictions[0].pred_scores.item - assert len(predictions) == 1 + score = predictions[0].pred_score + assert len(predictions) == 2 assert torch.min(score) >= 0 assert torch.max(score) <= 1 diff --git a/tests/models/recognizers/test_recognizer2d.py b/tests/models/recognizers/test_recognizer2d.py index b40398755b..3a13b0ef37 100644 --- a/tests/models/recognizers/test_recognizer2d.py +++ b/tests/models/recognizers/test_recognizer2d.py @@ -21,7 +21,7 @@ def train_test_step(cfg, input_shape): 'inputs': [torch.randint(0, 256, input_shape) for i in range(batch_size)], 'data_samples': - [ActionDataSample().set_gt_labels(2) for i in range(batch_size)] + [ActionDataSample().set_gt_label(2) for i in range(batch_size)] } # test train_step @@ -34,7 +34,7 @@ def train_test_step(cfg, input_shape): # test test_step with torch.no_grad(): predictions = recognizer.test_step(data_batch) - score = predictions[0].pred_scores.item + score = predictions[0].pred_score assert len(predictions) == batch_size assert score.shape == torch.Size([num_classes]) assert torch.min(score) >= 0 @@ -46,7 +46,7 @@ def train_test_step(cfg, input_shape): data_batch['inputs'] = [torch.randint(0, 256, input_shape)] with torch.no_grad(): predictions = recognizer.test_step(data_batch) - score = predictions[0].pred_scores.item + score = predictions[0].pred_score assert len(predictions) == batch_size assert score.shape == torch.Size([num_classes]) diff --git a/tests/models/recognizers/test_recognizer3d.py b/tests/models/recognizers/test_recognizer3d.py index 7d80de00fb..c9f73d1a10 100644 --- a/tests/models/recognizers/test_recognizer3d.py +++ b/tests/models/recognizers/test_recognizer3d.py @@ -14,7 +14,7 @@ def train_test_step(cfg, input_shape): num_classes = cfg.model.cls_head.num_classes data_batch = { 'inputs': [torch.randint(0, 256, input_shape)], - 'data_samples': [ActionDataSample().set_gt_labels(2)] + 'data_samples': [ActionDataSample().set_gt_label(2)] } # test train_step @@ -27,7 +27,7 @@ def train_test_step(cfg, input_shape): # test test_step with torch.no_grad(): predictions = recognizer.test_step(data_batch) - score = predictions[0].pred_scores.item + score = predictions[0].pred_score assert len(predictions) == 1 assert score.shape == torch.Size([num_classes]) assert torch.min(score) >= 0 @@ -40,7 +40,7 @@ def train_test_step(cfg, input_shape): data_batch['inputs'] = [torch.randint(0, 256, input_shape)] with torch.no_grad(): predictions = recognizer.test_step(data_batch) - score = predictions[0].pred_scores.item + score = predictions[0].pred_score assert len(predictions) == 1 assert score.shape == torch.Size([num_views, num_classes]) diff --git a/tests/models/recognizers/test_recognizer_gcn.py b/tests/models/recognizers/test_recognizer_gcn.py index 7ae1441a6b..723c77d595 100644 --- a/tests/models/recognizers/test_recognizer_gcn.py +++ b/tests/models/recognizers/test_recognizer_gcn.py @@ -14,7 +14,7 @@ def train_test_step(cfg, input_shape): num_classes = cfg.model.cls_head.num_classes data_batch = { 'inputs': [torch.randn(input_shape)], - 'data_samples': [ActionDataSample().set_gt_labels(2)] + 'data_samples': [ActionDataSample().set_gt_label(2)] } # test train_step @@ -27,7 +27,7 @@ def train_test_step(cfg, input_shape): # test test_step with torch.no_grad(): predictions = recognizer.test_step(data_batch) - score = predictions[0].pred_scores.item + score = predictions[0].pred_score assert len(predictions) == 1 assert score.shape == torch.Size([num_classes]) assert torch.min(score) >= 0 @@ -40,7 +40,7 @@ def train_test_step(cfg, input_shape): data_batch['inputs'] = [torch.randn(input_shape)] with torch.no_grad(): predictions = recognizer.test_step(data_batch) - score = predictions[0].pred_scores.item + score = predictions[0].pred_score assert len(predictions) == 1 assert score.shape == torch.Size([num_clips, num_classes]) diff --git a/tests/models/utils/test_blending_utils.py b/tests/models/utils/test_blending_utils.py index 993b331093..e2eba9de47 100644 --- a/tests/models/utils/test_blending_utils.py +++ b/tests/models/utils/test_blending_utils.py @@ -4,7 +4,6 @@ import torch import torch.nn.functional as F from mmcv.transforms import to_tensor -from mmengine.structures import LabelData from mmaction.models import CutmixBlending, MixupBlending, RandomBatchAugment from mmaction.structures import ActionDataSample @@ -14,7 +13,7 @@ def get_label(label_): label = [] for idx, one_label in enumerate(label_): data_sample = ActionDataSample() - data_sample.gt_labels = LabelData(item=label_[idx]) + data_sample.set_gt_label(label_[idx]) label.append(data_sample) return label diff --git a/tests/models/utils/test_gradcam.py b/tests/models/utils/test_gradcam.py index e9568531c5..3982907bcb 100644 --- a/tests/models/utils/test_gradcam.py +++ b/tests/models/utils/test_gradcam.py @@ -41,7 +41,7 @@ def _do_test_2D_models(recognizer, device='cpu'): demo_data = { 'inputs': [torch.randint(0, 256, input_shape[1:])], - 'data_samples': [ActionDataSample().set_gt_labels(2)] + 'data_samples': [ActionDataSample().set_gt_label(2)] } recognizer = recognizer.to(device) @@ -67,7 +67,7 @@ def _do_test_3D_models(recognizer, input_shape, num_classes=num_classes, model_type='3D') demo_data = { 'inputs': [torch.randint(0, 256, input_shape[1:])], - 'data_samples': [ActionDataSample().set_gt_labels(2)] + 'data_samples': [ActionDataSample().set_gt_label(2)] } gradcam = GradCAM(recognizer, target_layer_name) diff --git a/tests/visualization/test_action_visualizer.py b/tests/visualization/test_action_visualizer.py index c86b324af9..298b59a842 100644 --- a/tests/visualization/test_action_visualizer.py +++ b/tests/visualization/test_action_visualizer.py @@ -3,8 +3,6 @@ import decord import pytest -import torch -from mmengine.structures import LabelData from mmaction.structures import ActionDataSample from mmaction.visualization import ActionVisualizer @@ -16,7 +14,7 @@ def test_visualizer(): video = video.get_batch(range(32)).asnumpy() data_sample = ActionDataSample() - data_sample.gt_labels = LabelData(item=torch.tensor([2])) + data_sample.set_gt_label(2) vis = ActionVisualizer() vis.add_datasample('demo', video) diff --git a/tests/visualization/test_video_backend.py b/tests/visualization/test_video_backend.py index c5153d812d..591646eb7a 100644 --- a/tests/visualization/test_video_backend.py +++ b/tests/visualization/test_video_backend.py @@ -8,8 +8,6 @@ import decord import pytest -import torch -from mmengine.structures import LabelData from mmaction.structures import ActionDataSample from mmaction.utils import register_all_modules @@ -24,7 +22,7 @@ def test_local_visbackend(): video = video.get_batch(range(32)).asnumpy() data_sample = ActionDataSample() - data_sample.gt_labels = LabelData(item=torch.tensor([2])) + data_sample.set_gt_label(2) with TemporaryDirectory() as tmp_dir: vis = ActionVisualizer( save_dir=tmp_dir, vis_backends=[dict(type='LocalVisBackend')]) @@ -46,7 +44,7 @@ def test_tensorboard_visbackend(): video = video.get_batch(range(32)).asnumpy() data_sample = ActionDataSample() - data_sample.gt_labels = LabelData(item=torch.tensor([2])) + data_sample.set_gt_label(2) with TemporaryDirectory() as tmp_dir: vis = ActionVisualizer( save_dir=tmp_dir, @@ -63,29 +61,3 @@ def test_tensorboard_visbackend(): # wait tensorboard store asynchronously time.sleep(1) return - - -""" -def test_wandb_visbackend(): - video = decord.VideoReader('./demo/demo.mp4') - video = video.get_batch(range(32)).asnumpy() - - data_sample = ActionDataSample() - data_sample.gt_labels = LabelData(item=torch.tensor([2])) - - vis = ActionVisualizer( - save_dir='./outputs', vis_backends=[dict(type='WandbVisBackend')]) - vis.add_datasample('demo', video, data_sample, step=1) - - wandb_dir = 'outputs/vis_data/wandb/' - assert Path(wandb_dir).exists() - - flag = False - for item in os.listdir(wandb_dir): - if item.startswith('run-') and os.path.isdir('%s/%s' % - (wandb_dir, item)): - flag = True - break - assert flag, 'Cannot find wandb folder!' - return -""" diff --git a/tools/analysis_tools/report_accuracy.py b/tools/analysis_tools/report_accuracy.py index c361f644de..d5c529dfe1 100644 --- a/tools/analysis_tools/report_accuracy.py +++ b/tools/analysis_tools/report_accuracy.py @@ -39,20 +39,13 @@ def main(): data_sample_list = [load(f) for f in args.preds] score_list = [] for data_samples in data_sample_list: - scores = [ - sample['pred_scores']['item'].numpy() for sample in data_samples - ] + scores = [sample['pred_score'].numpy() for sample in data_samples] score_list.append(scores) if args.multi_label: - labels = [ - sample['gt_labels']['item'] for sample in data_sample_list[0] - ] + labels = [sample['gt_label'] for sample in data_sample_list[0]] else: - labels = [ - sample['gt_labels']['item'].item() - for sample in data_sample_list[0] - ] + labels = [sample['gt_label'].item() for sample in data_sample_list[0]] if args.apply_softmax: diff --git a/tools/deployment/export_onnx_gcn.py b/tools/deployment/export_onnx_gcn.py index a4fd237a59..b9cb8423a6 100644 --- a/tools/deployment/export_onnx_gcn.py +++ b/tools/deployment/export_onnx_gcn.py @@ -122,7 +122,7 @@ def main(): base_output = base_model( input_tensor.unsqueeze(0), data_samples=[data_sample], mode='predict')[0] - base_output = base_output.pred_scores.item.detach().cpu().numpy() + base_output = base_output.pred_score.detach().cpu().numpy() model = GCNNet(base_model).to(args.device) model.eval() diff --git a/tools/deployment/export_onnx_posec3d.py b/tools/deployment/export_onnx_posec3d.py index 014096b48e..f8950dd8c8 100644 --- a/tools/deployment/export_onnx_posec3d.py +++ b/tools/deployment/export_onnx_posec3d.py @@ -118,7 +118,7 @@ def main(): base_output = base_model( input_tensor.unsqueeze(0), data_samples=[data_sample], mode='predict')[0] - base_output = base_output.pred_scores.item.detach().cpu().numpy() + base_output = base_output.pred_score.detach().cpu().numpy() model = GCNNet(base_model).to(args.device) model.eval() From ed1270cbd8d69706355fb3953a97e6ce11cff3f0 Mon Sep 17 00:00:00 2001 From: wxDai Date: Wed, 6 Sep 2023 21:23:19 +0800 Subject: [PATCH 16/24] [Feature] Support UMT (#2657) --- projects/umt/README.md | 93 +++++ ...6-res224_kinetics710-pre-ft_u8_k400-rgb.py | 82 +++++ ...6-res224_kinetics710-pre-ft_u8_k700-rgb.py | 82 +++++ ...6-res224_kinetics710-pre-ft_u8_k400-rgb.py | 82 +++++ ...6-res224_kinetics710-pre-ft_u8_k700-rgb.py | 82 +++++ projects/umt/models/__init__.py | 3 + projects/umt/models/vit.py | 344 ++++++++++++++++++ 7 files changed, 768 insertions(+) create mode 100644 projects/umt/README.md create mode 100644 projects/umt/configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py create mode 100644 projects/umt/configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k700-rgb.py create mode 100644 projects/umt/configs/umt-large-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py create mode 100644 projects/umt/configs/umt-large-p16-res224_kinetics710-pre-ft_u8_k700-rgb.py create mode 100644 projects/umt/models/__init__.py create mode 100644 projects/umt/models/vit.py diff --git a/projects/umt/README.md b/projects/umt/README.md new file mode 100644 index 0000000000..1d2db487fd --- /dev/null +++ b/projects/umt/README.md @@ -0,0 +1,93 @@ +# UMT Project + +[Unmasked Teacher: Towards Training-Efficient Video Foundation Models](https://arxiv.org/abs/2303.16058) + + + +## Abstract + + + +Video Foundation Models (VFMs) have received limited exploration due to high computational costs and data scarcity. Previous VFMs rely on Image Foundation Models (IFMs), which face challenges in transferring to the video domain. Although VideoMAE has trained a robust ViT from limited data, its low-level reconstruction poses convergence difficulties and conflicts with high-level cross-modal alignment. This paper proposes a training-efficient method for temporal-sensitive VFMs that integrates the benefits of existing methods. To increase data efficiency, we mask out most of the low-semantics video tokens, but selectively align the unmasked tokens with IFM, which serves as the UnMasked Teacher (UMT). By providing semantic guidance, our method enables faster convergence and multimodal friendliness. With a progressive pre-training framework, our model can handle various tasks including scene-related, temporal-related, and complex video-language understanding. Using only public sources for pre-training in 6 days on 32 A100 GPUs, our scratch-built ViT-L/16 achieves state-of-the-art performances on various video tasks. + + + +

+ +
+ +## Usage + +### Setup Environment + +Please refer to [Installation](https://mmaction2.readthedocs.io/en/latest/get_started/installation.html) to install MMAction2. + +Assume that you are located at `$MMACTION2/projects/umt`. + +Add the current folder to `PYTHONPATH`, so that Python can find your code. Run the following command in the current directory to add it. + +> Please run it every time after you opened a new shell. + +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` + +### Data Preparation + +Prepare the Kinetics dataset according to the [instruction](https://github.com/open-mmlab/mmaction2/tree/main/tools/data/kinetics#readme). + +Create a symbolic link from `$MMACTION2/data` to `./data` in the current directory, so that Python can locate your data. Run the following command in the current directory to create the symbolic link. + +```shell +ln -s ../../data ./data +``` + +### Testing commands + +**To test with single GPU:** + +```bash +mim test mmaction configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py --checkpoint $CHECKPOINT +``` + +**To test with multiple GPUs:** + +```bash +mim test mmaction configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py --checkpoint $CHECKPOINT --launcher pytorch --gpus 8 +``` + +**To test with multiple GPUs by slurm:** + +```bash +mim test mmaction configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py --checkpoint $CHECKPOINT --launcher slurm \ + --gpus 8 --gpus-per-node 8 --partition $PARTITION +``` + +## Results + +### Kinetics400 + +| frame sampling strategy | resolution | backbone | pretrain | top1 acc | testing protocol | config | ckpt | +| :---------------------: | :--------: | :------: | :---------: | :------: | :--------------: | :-------------------------------------------------------------: | :-----------------------------------------------------------: | +| uniform 8 | 224x224 | UMT-B | Kinetics710 | 87.33 | 4 clips x 3 crop | [config](./configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/projects/umt/umt-base-p16-res224_kinetics710-pre-ft_u8_k400-rgb/umt-base-p16-res224_kinetics710-pre-ft_u8_k400-rgb.pth) | +| uniform 8 | 224x224 | UMT-L | Kinetics710 | 90.21 | 4 clips x 3 crop | [config](./configs/umt-large-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/projects/umt/umt-large-p16-res224_kinetics710-pre-ft_u8_k400-rgb/umt-large-p16-res224_kinetics710-pre-ft_u8_k400-rgb.pth) | + +### Kinetics700 + +| frame sampling strategy | resolution | backbone | pretrain | top1 acc | testing protocol | config | ckpt | +| :---------------------: | :--------: | :------: | :---------: | :------: | :--------------: | :-------------------------------------------------------------: | :-----------------------------------------------------------: | +| uniform 8 | 224x224 | UMT-B | Kinetics710 | 77.95 | 4 clips x 3 crop | [config](./configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/projects/umt/umt-base-p16-res224_kinetics710-pre-ft_u8_k700-rgb/umt-base-p16-res224_kinetics710-pre-ft_u8_k700-rgb.pth) | +| uniform 8 | 224x224 | UMT-L | Kinetics710 | 82.79 | 4 clips x 3 crop | [config](./configs/umt-large-p16-res224_kinetics710-pre-ft_u8_k700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/projects/umt/umt-large-p16-res224_kinetics710-pre-ft_u8_k700-rgb/umt-large-p16-res224_kinetics710-pre-ft_u8_k700-rgb.pth) | + +## Citation + + + +```bibtex +@article{li2023unmasked, + title={Unmasked teacher: Towards training-efficient video foundation models}, + author={Li, Kunchang and Wang, Yali and Li, Yizhuo and Wang, Yi and He, Yinan and Wang, Limin and Qiao, Yu}, + journal={arXiv preprint arXiv:2303.16058}, + year={2023} +} +``` diff --git a/projects/umt/configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py b/projects/umt/configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py new file mode 100644 index 0000000000..e4077abcb4 --- /dev/null +++ b/projects/umt/configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py @@ -0,0 +1,82 @@ +custom_imports = dict(imports='models') + +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='UMTViT', + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + all_frames=8, + qkv_bias=True), + cls_head=dict( + type='TimeSformerHead', + num_classes=400, + in_channels=768, + average_clips='prob'), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + format_shape='NCTHW')) + +# dataset settings +dataset_type = 'VideoDataset' +data_root_val = 'data/kinetics400/videos_val' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') + +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=8, num_clips=4, test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +test_dataloader = dict( + batch_size=8, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +test_evaluator = dict(type='AccMetric') +test_cfg = dict(type='TestLoop') + +default_scope = 'mmaction' + +default_hooks = dict( + runtime_info=dict(type='RuntimeInfoHook'), + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=20, ignore_last=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', interval=1, save_best='auto', max_keep_ckpts=5), + sampler_seed=dict(type='DistSamplerSeedHook'), + sync_buffers=dict(type='SyncBuffersHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) + +log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict(type='ActionVisualizer', vis_backends=vis_backends) + +log_level = 'INFO' +load_from = None +resume = False diff --git a/projects/umt/configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k700-rgb.py b/projects/umt/configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k700-rgb.py new file mode 100644 index 0000000000..29bf3f002d --- /dev/null +++ b/projects/umt/configs/umt-base-p16-res224_kinetics710-pre-ft_u8_k700-rgb.py @@ -0,0 +1,82 @@ +custom_imports = dict(imports='models') + +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='UMTViT', + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + all_frames=8, + qkv_bias=True), + cls_head=dict( + type='TimeSformerHead', + num_classes=700, + in_channels=768, + average_clips='prob'), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + format_shape='NCTHW')) + +# dataset settings +dataset_type = 'VideoDataset' +data_root_val = 'data/kinetics700/videos_val' +ann_file_test = 'data/kinetics700/kinetics700_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') + +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=8, num_clips=4, test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +test_dataloader = dict( + batch_size=8, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +test_evaluator = dict(type='AccMetric') +test_cfg = dict(type='TestLoop') + +default_scope = 'mmaction' + +default_hooks = dict( + runtime_info=dict(type='RuntimeInfoHook'), + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=20, ignore_last=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', interval=1, save_best='auto', max_keep_ckpts=5), + sampler_seed=dict(type='DistSamplerSeedHook'), + sync_buffers=dict(type='SyncBuffersHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) + +log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict(type='ActionVisualizer', vis_backends=vis_backends) + +log_level = 'INFO' +load_from = None +resume = False diff --git a/projects/umt/configs/umt-large-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py b/projects/umt/configs/umt-large-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py new file mode 100644 index 0000000000..e243744e8a --- /dev/null +++ b/projects/umt/configs/umt-large-p16-res224_kinetics710-pre-ft_u8_k400-rgb.py @@ -0,0 +1,82 @@ +custom_imports = dict(imports='models') + +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='UMTViT', + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + all_frames=8, + qkv_bias=True), + cls_head=dict( + type='TimeSformerHead', + num_classes=400, + in_channels=1024, + average_clips='prob'), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + format_shape='NCTHW')) + +# dataset settings +dataset_type = 'VideoDataset' +data_root_val = 'data/kinetics400/videos_val' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') + +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=8, num_clips=4, test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +test_dataloader = dict( + batch_size=8, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +test_evaluator = dict(type='AccMetric') +test_cfg = dict(type='TestLoop') + +default_scope = 'mmaction' + +default_hooks = dict( + runtime_info=dict(type='RuntimeInfoHook'), + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=20, ignore_last=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', interval=1, save_best='auto', max_keep_ckpts=5), + sampler_seed=dict(type='DistSamplerSeedHook'), + sync_buffers=dict(type='SyncBuffersHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) + +log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict(type='ActionVisualizer', vis_backends=vis_backends) + +log_level = 'INFO' +load_from = None +resume = False diff --git a/projects/umt/configs/umt-large-p16-res224_kinetics710-pre-ft_u8_k700-rgb.py b/projects/umt/configs/umt-large-p16-res224_kinetics710-pre-ft_u8_k700-rgb.py new file mode 100644 index 0000000000..e75747b946 --- /dev/null +++ b/projects/umt/configs/umt-large-p16-res224_kinetics710-pre-ft_u8_k700-rgb.py @@ -0,0 +1,82 @@ +custom_imports = dict(imports='models') + +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='UMTViT', + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + all_frames=8, + qkv_bias=True), + cls_head=dict( + type='TimeSformerHead', + num_classes=700, + in_channels=1024, + average_clips='prob'), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + format_shape='NCTHW')) + +# dataset settings +dataset_type = 'VideoDataset' +data_root_val = 'data/kinetics700/videos_val' +ann_file_test = 'data/kinetics700/kinetics700_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') + +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=8, num_clips=4, test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +test_dataloader = dict( + batch_size=8, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +test_evaluator = dict(type='AccMetric') +test_cfg = dict(type='TestLoop') + +default_scope = 'mmaction' + +default_hooks = dict( + runtime_info=dict(type='RuntimeInfoHook'), + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=20, ignore_last=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', interval=1, save_best='auto', max_keep_ckpts=5), + sampler_seed=dict(type='DistSamplerSeedHook'), + sync_buffers=dict(type='SyncBuffersHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) + +log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict(type='ActionVisualizer', vis_backends=vis_backends) + +log_level = 'INFO' +load_from = None +resume = False diff --git a/projects/umt/models/__init__.py b/projects/umt/models/__init__.py new file mode 100644 index 0000000000..5c0c77a862 --- /dev/null +++ b/projects/umt/models/__init__.py @@ -0,0 +1,3 @@ +from .vit import UMTViT + +__all__ = ['UMTViT'] diff --git a/projects/umt/models/vit.py b/projects/umt/models/vit.py new file mode 100644 index 0000000000..00cebb128e --- /dev/null +++ b/projects/umt/models/vit.py @@ -0,0 +1,344 @@ +from functools import partial + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from mmcv.cnn.bricks import DropPath +from mmengine import to_2tuple + +from mmaction.registry import MODELS + + +class Mlp(nn.Module): + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # commit this for the original BERT implement + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + attn_head_dim=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat( + (self.q_bias, + torch.zeros_like(self.v_bias, + requires_grad=False), self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + init_values=None, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + attn_head_dim=attn_head_dim) + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + if init_values > 0: + self.gamma_1 = nn.Parameter( + init_values * torch.ones((dim)), requires_grad=True) + self.gamma_2 = nn.Parameter( + init_values * torch.ones((dim)), requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + num_frames=16, + tubelet_size=2): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.tubelet_size = int(tubelet_size) + num_patches = (img_size[1] // + patch_size[1]) * (img_size[0] // patch_size[0]) * ( + num_frames // self.tubelet_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + self.proj = nn.Conv3d( + in_channels=in_chans, + out_channels=embed_dim, + kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]), + stride=(self.tubelet_size, patch_size[0], patch_size[1])) + + def forward(self, x): + B, C, T, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model " \ + f'({self.img_size[0]}*{self.img_size[1]}).' + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +# sin-cos position encoding +def get_sinusoid_encoding_table(n_position, + d_hid, + cur_frame=-1, + pre_n_position=1568): + """Sinusoid position encoding table.""" + + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (hid_j // 2) / d_hid) + for hid_j in range(d_hid) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos_i) for pos_i in range(pre_n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + sinusoid_table = torch.tensor( + sinusoid_table, dtype=torch.float, requires_grad=False).unsqueeze(0) + print(f'n_position: {n_position}') + print(f'pre_n_position: {pre_n_position}') + if n_position // cur_frame * 8 != pre_n_position and cur_frame != -1: + T = 8 # checkpoint frame + P = 14 # checkpoint size + C = d_hid + new_P = int((n_position // cur_frame)**0.5) # testing size + print( + f'Pretraining uses 14x14, but current version is {new_P}x{new_P}') + print('Interpolate the position embedding') + sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C) + sinusoid_table = sinusoid_table.reshape(-1, P, P, + C).permute(0, 3, 1, 2) + sinusoid_table = torch.nn.functional.interpolate( + sinusoid_table, + size=(new_P, new_P), + mode='bicubic', + align_corners=False) + # BT, C, H, W -> BT, H, W, C -> B, T, H, W, C + sinusoid_table = sinusoid_table.permute(0, 2, 3, 1).reshape( + -1, T, new_P, new_P, C) + sinusoid_table = sinusoid_table.flatten(1, 3) + if cur_frame != -1 and cur_frame != 8: + print(f'Pretraining uses 8 frames, but current frame is {cur_frame}') + print('Interpolate the position embedding') + T = 8 # checkpoint frame + new_T = cur_frame # testing frame + # interpolate + P = int((n_position // cur_frame)**0.5) # testing size + C = d_hid + sinusoid_table = sinusoid_table.reshape(-1, T, P, P, C) + sinusoid_table = sinusoid_table.permute(0, 2, 3, 4, + 1).reshape(-1, C, + T) # BHW, C, T + sinusoid_table = torch.nn.functional.interpolate( + sinusoid_table, size=new_T, mode='linear') + sinusoid_table = sinusoid_table.reshape(1, P, P, C, new_T).permute( + 0, 4, 1, 2, 3) # B, T, H, W, C + sinusoid_table = sinusoid_table.flatten(1, 3) + if n_position == pre_n_position: + return sinusoid_table + else: + print('Use learnable position embedding') + return nn.Parameter(sinusoid_table, requires_grad=True) + + +@MODELS.register_module() +class UMTViT(nn.Module): + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), + init_values=0., + use_learnable_pos_emb=False, + all_frames=16, + tubelet_size=1, + use_checkpoint=False, + checkpoint_num=0, + use_mean_pooling=True): + super().__init__() + self.num_features = self.embed_dim = embed_dim + self.tubelet_size = tubelet_size + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + num_frames=all_frames, + tubelet_size=self.tubelet_size) + num_patches = self.patch_embed.num_patches + self.use_checkpoint = use_checkpoint + self.checkpoint_num = checkpoint_num + print(f'Use checkpoint: {use_checkpoint}') + print(f'Checkpoint number: {checkpoint_num}') + + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches, embed_dim)) + else: + # sine-cosine positional embeddings is on the way + if patch_size == 14: + pre_n_position = 2048 + else: + pre_n_position = 1568 + self.pos_embed = get_sinusoid_encoding_table( + num_patches, + embed_dim, + all_frames // tubelet_size, + pre_n_position=pre_n_position) + + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values) for i in range(depth) + ]) + self.norm = nn.Identity() if use_mean_pooling else norm_layer( + embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + + def forward_features(self, x): + x = self.patch_embed(x) + B, _, _ = x.size() + + if self.pos_embed is not None: + x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to( + x.device).clone().detach() + x = self.pos_drop(x) + + for idx, blk in enumerate(self.blocks): + if self.use_checkpoint and idx < self.checkpoint_num: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + + x = self.norm(x) + if self.fc_norm is not None: + return self.fc_norm(x.mean(1)) + else: + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + return x From 2ddf4b55d03de10eb815e41080de8b83552304af Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Wed, 6 Sep 2023 21:24:10 +0800 Subject: [PATCH 17/24] [Feature] Support MobileOne TSN/TSM (#2656) --- .circleci/test.yml | 2 + .github/workflows/merge_stage_test.yml | 6 + .github/workflows/pr_stage_test.yml | 4 + configs/_base_/models/tsm_mobileone_s4.py | 31 ++++ configs/_base_/models/tsn_mobileone_s0.py | 26 ++++ configs/recognition/tsm/README.md | 2 + configs/recognition/tsm/metafile.yml | 24 +++ ...one-s4_8xb16-1x1x16-50e_kinetics400-rgb.py | 126 ++++++++++++++++ ...deploy_8xb16-1x1x16-50e_kinetics400-rgb.py | 5 + configs/recognition/tsn/README.md | 3 + ...one-s4_8xb32-1x1x8-100e_kinetics400-rgb.py | 75 ++++++++++ ...deploy_8xb32-1x1x8-100e_kinetics400-rgb.py | 5 + configs/recognition/tsn/metafile.yml | 23 +++ mmaction/models/backbones/__init__.py | 7 + mmaction/models/backbones/mobileone_tsm.py | 140 ++++++++++++++++++ tests/models/backbones/test_mobileone_tsm.py | 85 +++++++++++ tests/models/recognizers/test_recognizer2d.py | 22 +++ tools/convert/reparameterize_model.py | 57 +++++++ 18 files changed, 643 insertions(+) create mode 100644 configs/_base_/models/tsm_mobileone_s4.py create mode 100644 configs/_base_/models/tsn_mobileone_s0.py create mode 100644 configs/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb.py create mode 100644 configs/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_deploy_8xb16-1x1x16-50e_kinetics400-rgb.py create mode 100644 configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb.py create mode 100644 configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_deploy_8xb32-1x1x8-100e_kinetics400-rgb.py create mode 100644 mmaction/models/backbones/mobileone_tsm.py create mode 100644 tests/models/backbones/test_mobileone_tsm.py create mode 100644 tools/convert/reparameterize_model.py diff --git a/.circleci/test.yml b/.circleci/test.yml index 5c57cd74b9..2d5713cf1a 100644 --- a/.circleci/test.yml +++ b/.circleci/test.yml @@ -66,6 +66,7 @@ jobs: mim install 'mmcv >= 2.0.0' pip install git+https://git@github.com/open-mmlab/mmdetection.git@dev-3.x pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmpretrain.git@dev pip install git+https://github.com/open-mmlab/mmpose.git@dev-1.x pip install -r requirements.txt - run: @@ -126,6 +127,7 @@ jobs: docker exec mmaction pip install git+https://git@github.com/open-mmlab/mmdetection.git@dev-3.x docker exec mmaction pip install git+https://git@github.com/open-mmlab/mmpose.git@dev-1.x docker exec mmaction pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + docker exec mmaction pip install git+https://github.com/open-mmlab/mmpretrain.git@dev docker exec mmaction pip install -r requirements.txt - run: name: Build and install diff --git a/.github/workflows/merge_stage_test.yml b/.github/workflows/merge_stage_test.yml index 0b83911506..de01615037 100644 --- a/.github/workflows/merge_stage_test.yml +++ b/.github/workflows/merge_stage_test.yml @@ -60,6 +60,8 @@ jobs: run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x - name: Install MMCls run: pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + - name: Install MMPretrain + run: pip install git+https://github.com/open-mmlab/mmpretrain.git@dev - name: Install MMPose run: pip install git+https://github.com/open-mmlab/mmpose.git@dev-1.x - name: Install PytorchVideo @@ -122,6 +124,8 @@ jobs: run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x - name: Install MMCls run: pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + - name: Install MMPretrain + run: pip install git+https://github.com/open-mmlab/mmpretrain.git@dev - name: Install MMPose run: pip install git+https://github.com/open-mmlab/mmpose.git@dev-1.x - name: Install unittest dependencies @@ -186,6 +190,7 @@ jobs: mim install 'mmcv >= 2.0.0' pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmpretrain.git@dev pip install git+https://github.com/open-mmlab/mmpose.git@dev-1.x pip install -r requirements.txt - name: Install PytorchVideo @@ -228,6 +233,7 @@ jobs: mim install 'mmcv >= 2.0.0' pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmpretrain.git@dev pip install git+https://github.com/open-mmlab/mmpose.git@dev-1.x pip install -r requirements.txt - name: Install PytorchVideo diff --git a/.github/workflows/pr_stage_test.yml b/.github/workflows/pr_stage_test.yml index 2513d38596..63b9558e4b 100644 --- a/.github/workflows/pr_stage_test.yml +++ b/.github/workflows/pr_stage_test.yml @@ -51,6 +51,8 @@ jobs: run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x - name: Install MMCls run: pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + - name: Install MMPretrain + run: pip install git+https://github.com/open-mmlab/mmpretrain.git@dev - name: Install MMPose run: pip install git+https://github.com/open-mmlab/mmpose.git@dev-1.x - name: Install unittest dependencies @@ -119,6 +121,7 @@ jobs: mim install 'mmcv >= 2.0.0' pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmpretrain.git@dev pip install git+https://github.com/open-mmlab/mmpose.git@dev-1.x pip install -r requirements.txt - name: Install PytorchVideo @@ -168,6 +171,7 @@ jobs: mim install 'mmcv >= 2.0.0' pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x + pip install git+https://github.com/open-mmlab/mmpretrain.git@dev pip install git+https://github.com/open-mmlab/mmpose.git@dev-1.x pip install -r requirements.txt - name: Install PytorchVideo diff --git a/configs/_base_/models/tsm_mobileone_s4.py b/configs/_base_/models/tsm_mobileone_s4.py new file mode 100644 index 0000000000..df0c8f8c3c --- /dev/null +++ b/configs/_base_/models/tsm_mobileone_s4.py @@ -0,0 +1,31 @@ +# model settings +preprocess_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) + +checkpoint = ('https://download.openmmlab.com/mmclassification/' + 'v0/mobileone/mobileone-s4_8xb32_in1k_20221110-28d888cb.pth') +model = dict( + type='Recognizer2D', + backbone=dict( + type='MobileOneTSM', + arch='s4', + shift_div=8, + num_segments=8, + is_shift=True, + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint, prefix='backbone')), + cls_head=dict( + type='TSMHead', + num_segments=8, + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True, + average_clips='prob'), + # model training and testing settings + data_preprocessor=dict(type='ActionDataPreprocessor', **preprocess_cfg), + train_cfg=None, + test_cfg=None) diff --git a/configs/_base_/models/tsn_mobileone_s0.py b/configs/_base_/models/tsn_mobileone_s0.py new file mode 100644 index 0000000000..83a070f143 --- /dev/null +++ b/configs/_base_/models/tsn_mobileone_s0.py @@ -0,0 +1,26 @@ +checkpoint = ('https://download.openmmlab.com/mmclassification/' + 'v0/mobileone/mobileone-s0_8xb32_in1k_20221110-0bc94952.pth') +model = dict( + type='Recognizer2D', + backbone=dict( + type='mmpretrain.MobileOne', + arch='s0', + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint, prefix='backbone'), + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=1024, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01, + average_clips='prob'), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + format_shape='NCHW'), + train_cfg=None, + test_cfg=None) diff --git a/configs/recognition/tsm/README.md b/configs/recognition/tsm/README.md index 3014d0e26b..0a02e14cf6 100644 --- a/configs/recognition/tsm/README.md +++ b/configs/recognition/tsm/README.md @@ -30,6 +30,7 @@ The explosive growth in video streaming gives rise to challenges on performing v | 1x1x8 | 224x224 | 8 | ResNet50 (NonLocalGauss) | ImageNet | 73.66 | 90.99 | 8 clips x 10 crop | 59.06G | 28.00M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-7e54dacf.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log) | | 1x1x8 | 224x224 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 74.34 | 91.23 | 8 clips x 10 crop | 61.30G | 31.68M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-35eddb57.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log) | | 1x1x8 | 224x224 | 8 | MobileNetV2 | ImageNet | 68.71 | 88.32 | 8 clips x 3 crop | 3.269G | 2.736M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-mobilenetv2_8xb16-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-mobilenetv2_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-mobilenetv2_8xb16-1x1x8-100e_kinetics400-rgb_20230414-401127fd.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-mobilenetv2_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-mobilenetv2_8xb16-1x1x8-100e_kinetics400-rgb.log) | +| 1x1x16 | 224x224 | 8 | MobileOne-S4 | ImageNet | 74.38 | 91.71 | 16 clips x 10 crop | 48.65G | 13.72M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb_20230825-a7f8876b.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb.log) | ### Something-something V2 @@ -41,6 +42,7 @@ The explosive growth in video streaming gives rise to challenges on performing v 1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. +3. MoibleOne backbone supports reparameterization during inference. You can use the provided [reparameterize tool](/tools/convert/reparameterize_model.py) to convert the checkpoint and switch to the [deploy config file](/configs/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_deploy_8xb16-1x1x16-50e_kinetics400-rgb.py). For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). diff --git a/configs/recognition/tsm/metafile.yml b/configs/recognition/tsm/metafile.yml index 409f5a95df..0360c16758 100644 --- a/configs/recognition/tsm/metafile.yml +++ b/configs/recognition/tsm/metafile.yml @@ -167,6 +167,30 @@ Models: Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-7e54dacf.pth + - Name: tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb + Config: configs/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb.py + In Collection: TSM + Metadata: + Architecture: MobileOne-S4 + Batch Size: 16 + Epochs: 100 + FLOPs: 48.65G + Parameters: 13.72M + Pretrained: ImageNet + Resolution: 224x224 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 74.38 + Top 5 Accuracy: 91.71 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb_20230825-a7f8876b.pth + + - Name: tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb Config: configs/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.py In Collection: TSM diff --git a/configs/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb.py b/configs/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb.py new file mode 100644 index 0000000000..e4fac52656 --- /dev/null +++ b/configs/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb.py @@ -0,0 +1,126 @@ +_base_ = [ + '../../_base_/models/tsm_mobileone_s4.py', + '../../_base_/default_runtime.py' +] + +model = dict(cls_head=dict(num_segments=16)) +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') + +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +default_hooks = dict(checkpoint=dict(interval=3, max_keep_ckpts=3)) + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=50, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=True, begin=0, end=5), + dict( + type='MultiStepLR', + begin=0, + end=50, + by_epoch=True, + milestones=[25, 45], + gamma=0.1) +] + +optim_wrapper = dict( + constructor='TSMOptimWrapperConstructor', + paramwise_cfg=dict(fc_lr5=True), + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00002), + clip_grad=dict(max_norm=20, norm_type=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=True, base_batch_size=128) diff --git a/configs/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_deploy_8xb16-1x1x16-50e_kinetics400-rgb.py b/configs/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_deploy_8xb16-1x1x16-50e_kinetics400-rgb.py new file mode 100644 index 0000000000..ecd0ed32e0 --- /dev/null +++ b/configs/recognition/tsm/tsm_imagenet-pretrained-mobileone-s4_deploy_8xb16-1x1x16-50e_kinetics400-rgb.py @@ -0,0 +1,5 @@ +_base_ = [ + './tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb.py', # noqa: E501 +] + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/recognition/tsn/README.md b/configs/recognition/tsn/README.md index 8ff8222649..ca21386ce2 100644 --- a/configs/recognition/tsn/README.md +++ b/configs/recognition/tsn/README.md @@ -40,6 +40,7 @@ Deep convolutional networks have achieved great success for visual recognition i It's possible and convenient to use a 3rd-party backbone for TSN under the framework of MMAction2, here we provide some examples for: - [x] Backbones from [MMClassification](https://github.com/open-mmlab/mmclassification/) +- [x] Backbones from [MMPretrain](https://github.com/open-mmlab/mmpretrain) - [x] Backbones from [TorchVision](https://github.com/pytorch/vision/) - [x] Backbones from [TIMM (pytorch-image-models)](https://github.com/rwightman/pytorch-image-models) @@ -49,10 +50,12 @@ It's possible and convenient to use a 3rd-party backbone for TSN under the frame | 1x1x3 | MultiStep | 224x224 | 8 | DenseNet161 | ImageNet | 72.07 | 90.15 | 25 clips x 10 crop | 194.6G | 27.36M | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb_20220906-5f4c0daf.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.log) | | 1x1x3 | MultiStep | 224x224 | 8 | Swin Transformer | ImageNet | 77.03 | 92.61 | 25 clips x 10 crop | 386.7G | 87.15M | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb_20220906-65ed814e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.log) | | 1x1x8 | MultiStep | 224x224 | 8 | Swin Transformer | ImageNet | 79.22 | 94.20 | 25 clips x 10 crop | 386.7G | 87.15M | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-swin-transformer_32xb8-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_32xb8-1x1x8-50e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_32xb8-1x1x8-50e_kinetics400-rgb_20230530-428f0064.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_32xb8-1x1x8-50e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_32xb8-1x1x8-50e_kinetics400-rgb.log) | +| 1x1x8 | MultiStep | 224x224 | 8 | MobileOne-S4 | ImageNet | 73.65 | 91.32 | 25 clips x 10 crop | 76G | 13.72M | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb_20230825-2da3c1f7.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb.log) | 1. Note that some backbones in TIMM are not supported due to multiple reasons. Please refer to [PR #880](https://github.com/open-mmlab/mmaction2/pull/880) for details. 2. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 3. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. +4. MoibleOne backbone supports reparameterization during inference. You can use the provided [reparameterize tool](/tools/convert/reparameterize_model.py) to convert the checkpoint and switch to the [deploy config file](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_deploy_8xb32-1x1x8-100e_kinetics400-rgb.py). For more details on data preparation, you can refer to diff --git a/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb.py b/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb.py new file mode 100644 index 0000000000..5f07bf40ab --- /dev/null +++ b/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb.py @@ -0,0 +1,75 @@ +_base_ = ['../tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py'] + +# dataset settings +checkpoint = ('https://download.openmmlab.com/mmclassification/' + 'v0/mobileone/mobileone-s4_8xb32_in1k_20221110-28d888cb.pth') +model = dict( + backbone=dict( + type='mmpretrain.MobileOne', + arch='s4', + out_indices=(3, ), + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint, prefix='backbone'), + _delete_=True), + cls_head=dict(in_channels=2048)) + +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') + +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) diff --git a/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_deploy_8xb32-1x1x8-100e_kinetics400-rgb.py b/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_deploy_8xb32-1x1x8-100e_kinetics400-rgb.py new file mode 100644 index 0000000000..38ab106a3f --- /dev/null +++ b/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_deploy_8xb32-1x1x8-100e_kinetics400-rgb.py @@ -0,0 +1,5 @@ +_base_ = [ + './tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb.py' # noqa: E501 +] + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/recognition/tsn/metafile.yml b/configs/recognition/tsn/metafile.yml index 378040098c..06822d633c 100644 --- a/configs/recognition/tsn/metafile.yml +++ b/configs/recognition/tsn/metafile.yml @@ -215,6 +215,29 @@ Models: Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_32xb8-1x1x8-50e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_32xb8-1x1x8-50e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb_20220906-65ed814e.pth + - Name: tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb + Config: configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb.py + In Collection: TSN + Metadata: + Architecture: MobileOne-S4 + Batch Size: 32 + Epochs: 100 + FLOPs: 76G + Parameters: 13.72M + Pretrained: ImageNet + Resolution: 224x224 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 73.65 + Top 5 Accuracy: 91.32 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb_20230825-2da3c1f7.pth + - Name: tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb Config: configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py In Collection: TSN diff --git a/mmaction/models/backbones/__init__.py b/mmaction/models/backbones/__init__.py index 2f4eb4a7e3..8a69a057d6 100644 --- a/mmaction/models/backbones/__init__.py +++ b/mmaction/models/backbones/__init__.py @@ -33,3 +33,10 @@ 'TimeSformer', 'UniFormer', 'UniFormerV2', 'VisionTransformer', 'X3D', 'RGBPoseConv3D' ] + +try: + from .mobileone_tsm import MobileOneTSM # noqa: F401 + __all__.append('MobileOneTSM') + +except (ImportError, ModuleNotFoundError): + pass diff --git a/mmaction/models/backbones/mobileone_tsm.py b/mmaction/models/backbones/mobileone_tsm.py new file mode 100644 index 0000000000..96722faf68 --- /dev/null +++ b/mmaction/models/backbones/mobileone_tsm.py @@ -0,0 +1,140 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch.nn as nn +from mmengine.logging import MMLogger +from mmengine.runner.checkpoint import (_load_checkpoint, + _load_checkpoint_with_prefix) +from mmpretrain.models import MobileOne + +from mmaction.registry import MODELS +from .resnet_tsm import TemporalShift + + +@MODELS.register_module() +class MobileOneTSM(MobileOne): + """MobileOne backbone for TSM. + + Args: + arch (str | dict): MobileOne architecture. If use string, choose + from 's0', 's1', 's2', 's3' and 's4'. If use dict, it should + have below keys: + + - num_blocks (Sequence[int]): Number of blocks in each stage. + - width_factor (Sequence[float]): Width factor in each stage. + - num_conv_branches (Sequence[int]): Number of conv branches + in each stage. + - num_se_blocks (Sequence[int]): Number of SE layers in each + stage, all the SE layers are placed in the subsequent order + in each stage. + + Defaults to 's0'. + num_segments (int): Number of frame segments. Defaults to 8. + is_shift (bool): Whether to make temporal shift in reset layers. + Defaults to True. + shift_div (int): Number of div for shift. Defaults to 8. + pretraind2d (bool): Whether to load pretrained 2D model. + Defaults to True. + **kwargs (keyword arguments, optional): Arguments for MobileOne. + """ + + def __init__(self, + arch: str, + num_segments: int = 8, + is_shift: bool = True, + shift_div: int = 8, + pretrained2d: bool = True, + **kwargs): + super().__init__(arch, **kwargs) + self.num_segments = num_segments + self.is_shift = is_shift + self.shift_div = shift_div + self.pretrained2d = pretrained2d + self.init_structure() + + def make_temporal_shift(self): + """Make temporal shift for some layers. + + To make reparameterization work, we can only build the shift layer + before the 'block', instead of the 'blockres' + """ + + def make_block_temporal(stage, num_segments): + """Make temporal shift on some blocks. + + Args: + stage (nn.Module): Model layers to be shifted. + num_segments (int): Number of frame segments. + + Returns: + nn.Module: The shifted blocks. + """ + blocks = list(stage.children()) + for i, b in enumerate(blocks): + blocks[i] = TemporalShift( + b, num_segments=num_segments, shift_div=self.shift_div) + return nn.Sequential(*blocks) + + self.stage0 = make_block_temporal( + nn.Sequential(self.stage0), self.num_segments)[0] + for i in range(1, 5): + temporal_stage = make_block_temporal( + getattr(self, f'stage{i}'), self.num_segments) + setattr(self, f'stage{i}', temporal_stage) + + def init_structure(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + if self.is_shift: + self.make_temporal_shift() + + def load_original_weights(self, logger): + assert self.init_cfg.get('type') == 'Pretrained', ( + 'Please specify ' + 'init_cfg to use pretrained 2d checkpoint') + self.pretrained = self.init_cfg.get('checkpoint') + prefix = self.init_cfg.get('prefix') + if prefix is not None: + original_state_dict = _load_checkpoint_with_prefix( + prefix, self.pretrained, map_location='cpu') + else: + original_state_dict = _load_checkpoint( + self.pretrained, map_location='cpu') + if 'state_dict' in original_state_dict: + original_state_dict = original_state_dict['state_dict'] + + wrapped_layers_map = dict() + for name, module in self.named_modules(): + ori_name = name + for wrap_prefix in ['.net']: + if wrap_prefix in ori_name: + ori_name = ori_name.replace(wrap_prefix, '') + wrapped_layers_map[ori_name] = name + + # convert wrapped keys + for param_name in list(original_state_dict.keys()): + layer_name = '.'.join(param_name.split('.')[:-1]) + if layer_name in wrapped_layers_map: + wrapped_name = param_name.replace( + layer_name, wrapped_layers_map[layer_name]) + original_state_dict[wrapped_name] = original_state_dict.pop( + param_name) + + msg = self.load_state_dict(original_state_dict, strict=True) + logger.info(msg) + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + if self.pretrained2d: + logger = MMLogger.get_current_instance() + self.load_original_weights(logger) + else: + super().init_weights() + + def forward(self, x): + """unpack tuple result.""" + x = super().forward(x) + if isinstance(x, tuple): + assert len(x) == 1 + x = x[0] + return x diff --git a/tests/models/backbones/test_mobileone_tsm.py b/tests/models/backbones/test_mobileone_tsm.py new file mode 100644 index 0000000000..b018e9f5a2 --- /dev/null +++ b/tests/models/backbones/test_mobileone_tsm.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile + +import torch +from mmengine.runner import load_checkpoint, save_checkpoint +from mmengine.runner.checkpoint import _load_checkpoint_with_prefix + +from mmaction.models.backbones.mobileone_tsm import MobileOneTSM +from mmaction.testing import generate_backbone_demo_inputs + + +def test_mobileone_tsm_backbone(): + """Test MobileOne TSM backbone.""" + + from mmpretrain.models.backbones.mobileone import MobileOneBlock + + from mmaction.models.backbones.resnet_tsm import TemporalShift + + model = MobileOneTSM('s0', pretrained2d=False) + model.init_weights() + for cur_module in model.modules(): + if isinstance(cur_module, TemporalShift): + # TemporalShift is a wrapper of MobileOneBlock + assert isinstance(cur_module.net, MobileOneBlock) + assert cur_module.num_segments == model.num_segments + assert cur_module.shift_div == model.shift_div + + inputs = generate_backbone_demo_inputs((8, 3, 64, 64)) + + feat = model(inputs) + assert feat.shape == torch.Size([8, 1024, 2, 2]) + + model = MobileOneTSM('s1', pretrained2d=False) + feat = model(inputs) + assert feat.shape == torch.Size([8, 1280, 2, 2]) + + model = MobileOneTSM('s2', pretrained2d=False) + feat = model(inputs) + assert feat.shape == torch.Size([8, 2048, 2, 2]) + + model = MobileOneTSM('s3', pretrained2d=False) + feat = model(inputs) + assert feat.shape == torch.Size([8, 2048, 2, 2]) + + model = MobileOneTSM('s4', pretrained2d=False) + feat = model(inputs) + assert feat.shape == torch.Size([8, 2048, 2, 2]) + + +def test_mobileone_init_weight(): + checkpoint = ('https://download.openmmlab.com/mmclassification/v0' + '/mobileone/mobileone-s0_8xb32_in1k_20221110-0bc94952.pth') + # ckpt = torch.load(checkpoint)['state_dict'] + model = MobileOneTSM( + arch='s0', + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint, prefix='backbone')) + model.init_weights() + ori_ckpt = _load_checkpoint_with_prefix( + 'backbone', model.init_cfg['checkpoint'], map_location='cpu') + for name, param in model.named_parameters(): + ori_name = name.replace('.net', '') + assert torch.allclose(param, ori_ckpt[ori_name]), \ + f'layer {name} fail to load from pretrained checkpoint' + + +def test_load_deploy_mobileone(): + # Test output before and load from deploy checkpoint + model = MobileOneTSM('s0', pretrained2d=False) + inputs = generate_backbone_demo_inputs((8, 3, 64, 64)) + tmpdir = tempfile.gettempdir() + ckpt_path = os.path.join(tmpdir, 'ckpt.pth') + model.switch_to_deploy() + model.eval() + outputs = model(inputs) + + model_deploy = MobileOneTSM('s0', pretrained2d=False, deploy=True) + save_checkpoint(model.state_dict(), ckpt_path) + load_checkpoint(model_deploy, ckpt_path) + + outputs_load = model_deploy(inputs) + for feat, feat_load in zip(outputs, outputs_load): + assert torch.allclose(feat, feat_load) + os.remove(ckpt_path) diff --git a/tests/models/recognizers/test_recognizer2d.py b/tests/models/recognizers/test_recognizer2d.py index 3a13b0ef37..9c48877204 100644 --- a/tests/models/recognizers/test_recognizer2d.py +++ b/tests/models/recognizers/test_recognizer2d.py @@ -90,6 +90,16 @@ def test_tsn_mmcls_backbone(): train_test_step(config, input_shape) +def test_tsn_mobileone(): + register_all_modules() + config = get_recognizer_cfg( + 'tsn/custom_backbones/tsn_imagenet-pretrained-mobileone-s4_8xb32-1x1x8-100e_kinetics400-rgb.py' # noqa: E501 + ) + config.model['backbone']['init_cfg'] = None + input_shape = (1, 3, 3, 32, 32) + train_test_step(config, input_shape) + + def test_tsn_timm_backbone(): # test tsn from timm register_all_modules() @@ -142,6 +152,7 @@ def test_tsn_tv_backbone(): def test_tsm(): register_all_modules() + # test tsm-mobilenetv2 config = get_recognizer_cfg( 'tsm/tsm_imagenet-pretrained-mobilenetv2_8xb16-1x1x8-100e_kinetics400-rgb.py' # noqa: E501 ) @@ -151,6 +162,7 @@ def test_tsm(): input_shape = (1, 8, 3, 32, 32) train_test_step(config, input_shape) + # test tsm-res50 config = get_recognizer_cfg( 'tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py') config.model['backbone']['pretrained'] = None @@ -159,6 +171,16 @@ def test_tsm(): input_shape = (1, 8, 3, 32, 32) train_test_step(config, input_shape) + # test tsm-mobileone + config = get_recognizer_cfg( + 'tsm/tsm_imagenet-pretrained-mobileone-s4_8xb16-1x1x16-50e_kinetics400-rgb.py' # noqa: E501 + ) + config.model['backbone']['init_cfg'] = None + config.model['backbone']['pretrained2d'] = None + + input_shape = (1, 16, 3, 32, 32) + train_test_step(config, input_shape) + def test_trn(): register_all_modules() diff --git a/tools/convert/reparameterize_model.py b/tools/convert/reparameterize_model.py new file mode 100644 index 0000000000..6220e092fc --- /dev/null +++ b/tools/convert/reparameterize_model.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from pathlib import Path + +import torch + +from mmaction.apis import init_recognizer +from mmaction.models.recognizers import BaseRecognizer + + +def convert_recoginzer_to_deploy(model, checkpoint, save_path): + print('Converting...') + assert hasattr(model, 'backbone') and \ + hasattr(model.backbone, 'switch_to_deploy'), \ + '`model.backbone` must has method of "switch_to_deploy".' \ + f' But {model.backbone.__class__} does not have.' + + model.backbone.switch_to_deploy() + checkpoint['state_dict'] = model.state_dict() + torch.save(checkpoint, save_path) + + print('Done! Save at path "{}"'.format(save_path)) + + +def main(): + parser = argparse.ArgumentParser( + description='Convert the parameters of the repvgg block ' + 'from training mode to deployment mode.') + parser.add_argument( + 'config_path', + help='The path to the configuration file of the network ' + 'containing the repvgg block.') + parser.add_argument( + 'checkpoint_path', + help='The path to the checkpoint file corresponding to the model.') + parser.add_argument( + 'save_path', + help='The path where the converted checkpoint file is stored.') + args = parser.parse_args() + + save_path = Path(args.save_path) + if save_path.suffix != '.pth' and save_path.suffix != '.tar': + print('The path should contain the name of the pth format file.') + exit() + save_path.parent.mkdir(parents=True, exist_ok=True) + + model = init_recognizer( + args.config_path, checkpoint=args.checkpoint_path, device='cpu') + assert isinstance(model, BaseRecognizer), \ + '`model` must be a `mmpretrain.classifiers.ImageClassifier` instance.' + + checkpoint = torch.load(args.checkpoint_path) + convert_recoginzer_to_deploy(model, checkpoint, args.save_path) + + +if __name__ == '__main__': + main() From dd1901a89b379a7109868d304f8597f38da59db4 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Wed, 6 Sep 2023 21:26:18 +0800 Subject: [PATCH 18/24] [Fix] Fix resnet audio ut (#2637) --- tests/models/backbones/test_resnet_audio.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/models/backbones/test_resnet_audio.py b/tests/models/backbones/test_resnet_audio.py index 6c22bd137a..48af1744f5 100644 --- a/tests/models/backbones/test_resnet_audio.py +++ b/tests/models/backbones/test_resnet_audio.py @@ -3,6 +3,7 @@ from mmaction.models import ResNetAudio from mmaction.testing import generate_backbone_demo_inputs +from mmaction.utils import register_all_modules def test_resnet_audio_backbone(): @@ -10,6 +11,7 @@ def test_resnet_audio_backbone(): input_shape = (1, 1, 16, 16) spec = generate_backbone_demo_inputs(input_shape) # inference + register_all_modules() audioonly = ResNetAudio(50, None) audioonly.init_weights() audioonly.train() From baf385eeac775aa56aaf1066792f648476e5d517 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Thu, 7 Sep 2023 14:50:43 +0800 Subject: [PATCH 19/24] [Feature] Support VindLU multi-modality algorithm (#2667) --- .circleci/test.yml | 8 + .github/workflows/merge_stage_test.yml | 8 + configs/multimodal/vindlu/README.md | 87 + configs/multimodal/vindlu/metafile.yml | 55 + ...ndlu_beit-base_8x16_retrieval_msrvtt-9k.py | 200 ++ .../vindlu_beit-base_8x8_vqa_msrvtt-qa.py | 190 ++ .../vindlu_beit-base_vqa-mc_msrvtt-mc.py | 80 + mmaction/datasets/__init__.py | 4 +- mmaction/datasets/msrvtt_datasets.py | 116 ++ mmaction/datasets/transforms/formatting.py | 18 +- mmaction/datasets/transforms/processing.py | 5 +- mmaction/engine/runner/__init__.py | 5 +- mmaction/engine/runner/retrieval_loop.py | 168 ++ mmaction/evaluation/metrics/__init__.py | 4 +- .../evaluation/metrics/multimodal_metric.py | 565 ++++++ mmaction/models/__init__.py | 1 + mmaction/models/multimodal/__init__.py | 12 + mmaction/models/multimodal/vindlu/__init__.py | 12 + mmaction/models/multimodal/vindlu/beit3d.py | 350 ++++ .../models/multimodal/vindlu/modeling_bert.py | 1740 +++++++++++++++++ .../multimodal/vindlu/temporal_model.py | 213 ++ .../models/multimodal/vindlu/tokenizer.py | 45 + mmaction/models/multimodal/vindlu/utils.py | 195 ++ mmaction/models/multimodal/vindlu/vindlu.py | 227 +++ .../models/multimodal/vindlu/vindlu_ret.py | 464 +++++ .../models/multimodal/vindlu/vindlu_ret_mc.py | 87 + .../models/multimodal/vindlu/vindlu_vqa.py | 266 +++ mmaction/models/multimodal/vindlu/xbert.py | 40 + mmaction/registry.py | 8 +- mmaction/utils/__init__.py | 13 +- mmaction/utils/dependency.py | 81 + mmaction/utils/progress.py | 40 + requirements/multimodal.txt | 1 + setup.py | 1 + .../metrics/test_retrieval_metric.py | 118 +- tools/data/msrvtt/README.md | 68 + tools/data/msrvtt/README_zh-CN.md | 68 + tools/data/msrvtt/compress.py | 192 ++ tools/data/msrvtt/compress_msrvtt.sh | 10 + tools/data/msrvtt/download_msrvtt.sh | 22 + tools/data/video_retrieval/README_zh-CN.md | 2 +- 41 files changed, 5768 insertions(+), 21 deletions(-) create mode 100644 configs/multimodal/vindlu/README.md create mode 100644 configs/multimodal/vindlu/metafile.yml create mode 100644 configs/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k.py create mode 100644 configs/multimodal/vindlu/vindlu_beit-base_8x8_vqa_msrvtt-qa.py create mode 100644 configs/multimodal/vindlu/vindlu_beit-base_vqa-mc_msrvtt-mc.py create mode 100644 mmaction/datasets/msrvtt_datasets.py create mode 100644 mmaction/engine/runner/retrieval_loop.py create mode 100644 mmaction/evaluation/metrics/multimodal_metric.py create mode 100644 mmaction/models/multimodal/__init__.py create mode 100644 mmaction/models/multimodal/vindlu/__init__.py create mode 100644 mmaction/models/multimodal/vindlu/beit3d.py create mode 100644 mmaction/models/multimodal/vindlu/modeling_bert.py create mode 100644 mmaction/models/multimodal/vindlu/temporal_model.py create mode 100644 mmaction/models/multimodal/vindlu/tokenizer.py create mode 100644 mmaction/models/multimodal/vindlu/utils.py create mode 100644 mmaction/models/multimodal/vindlu/vindlu.py create mode 100644 mmaction/models/multimodal/vindlu/vindlu_ret.py create mode 100644 mmaction/models/multimodal/vindlu/vindlu_ret_mc.py create mode 100644 mmaction/models/multimodal/vindlu/vindlu_vqa.py create mode 100644 mmaction/models/multimodal/vindlu/xbert.py create mode 100644 mmaction/utils/dependency.py create mode 100644 mmaction/utils/progress.py create mode 100644 requirements/multimodal.txt create mode 100644 tools/data/msrvtt/README.md create mode 100644 tools/data/msrvtt/README_zh-CN.md create mode 100644 tools/data/msrvtt/compress.py create mode 100644 tools/data/msrvtt/compress_msrvtt.sh create mode 100644 tools/data/msrvtt/download_msrvtt.sh diff --git a/.circleci/test.yml b/.circleci/test.yml index 2d5713cf1a..169bba2778 100644 --- a/.circleci/test.yml +++ b/.circleci/test.yml @@ -73,6 +73,10 @@ jobs: name: Install timm command: | pip install timm + - run: + name: Install transformers + command: | + pip install transformers - when: condition: equal: [ "0.10.0", << parameters.torchvision >> ] @@ -118,6 +122,10 @@ jobs: command: | docker exec mmaction pip install timm docker exec mmaction python -m pip install pytorchvideo + - run: + name: Install transformers + command: | + docker exec mmaction pip install transformers - run: name: Install mmaction dependencies command: | diff --git a/.github/workflows/merge_stage_test.yml b/.github/workflows/merge_stage_test.yml index de01615037..0a0222903a 100644 --- a/.github/workflows/merge_stage_test.yml +++ b/.github/workflows/merge_stage_test.yml @@ -69,6 +69,8 @@ jobs: if: ${{matrix.torchvision == '0.10.0'}} - name: Install timm run: pip install timm + - name: Install transformers + run: pip install transformers - name: Build and install run: rm -rf .eggs && pip install -e . - name: Run unittests and generate coverage report @@ -110,6 +112,8 @@ jobs: run: pip install lmdb - name: Install timm run: pip install timm + - name: Install transformers + run: pip install transformers - name: Install TurboJpeg lib run: sudo apt-get install -y libturbojpeg - name: Install PyTorch @@ -183,6 +187,8 @@ jobs: run: pip install librosa soundfile - name: Install lmdb run: pip install lmdb + - name: Install transformers + run: pip install transformers - name: Install mmaction dependencies run: | pip install git+https://github.com/open-mmlab/mmengine.git@main @@ -240,6 +246,8 @@ jobs: run: python -m pip install pytorchvideo - name: Install timm run: python -m pip install timm + - name: Install transformers + run: python -m pip install transformers - name: Build and install run: | pip install -e . -v diff --git a/configs/multimodal/vindlu/README.md b/configs/multimodal/vindlu/README.md new file mode 100644 index 0000000000..c49fed61fa --- /dev/null +++ b/configs/multimodal/vindlu/README.md @@ -0,0 +1,87 @@ +# VindLU + +[VindLU: A Recipe for Effective Video-and-Language Pretraining](https://arxiv.org/abs/2212.05051) + + + +## Abstract + + + +The last several years have witnessed remarkable progress in video-and-language (VidL) understanding. However, most modern VidL approaches use complex and specialized model architectures and sophisticated pretraining protocols, making the reproducibility, analysis and comparisons of these frameworks difficult. Hence, instead of proposing yet another new VidL model, this paper conducts a thorough empirical study demystifying the most important factors in the VidL model design. Among the factors that we investigate are (i) the spatiotemporal architecture design, (ii) the multimodal fusion schemes, (iii) the pretraining objectives, (iv) the choice of pretraining data, (v) pretraining and finetuning protocols, and (vi) dataset and model scaling. Our empirical study reveals that the most important design factors include: temporal modeling, video-to-text multimodal fusion, masked modeling objectives, and joint training on images and videos. Using these empirical insights, we then develop a step-by-step recipe, dubbed VindLU, for effective VidL pretraining. Our final model trained using our recipe achieves comparable or better than state-of-the-art results on several VidL tasks without relying on external CLIP pretraining. In particular, on the text-to-video retrieval task, our approach obtains 61.2% on DiDeMo, and 55.0% on ActivityNet, outperforming current SOTA by 7.8% and 6.1% respectively. Furthermore, our model also obtains state-of-the-art video question-answering results on ActivityNet-QA, MSRVTT-QA, MSRVTT-MC and TVQA. Our code and pretrained models are publicly available at: https://github.com/klauscc/VindLU. + + + +
+ +
+ +## Results and Models + +### Video Retrieval on MSRVTT-9k + +| frame sampling strategy | resolution | gpus | vision encoder | text encoder | pretraining | Recall@1 | config | ckpt | log | +| :---------------------: | :--------: | :--: | :------------: | :----------: | :--------------------: | :------: | :-----------------------------------: | :---------------------------------: | :---------------------------------: | +| uniform 12 | 224x224 | 8 | BEiT-Base | Bert-Base | C5M (WebVid-2M + CC3M) | 44.0 | [config](/configs/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k/vindlu_beit-base_8x16_retrieval_msrvtt-9k_20230905-fc36231e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k/vindlu_beit-base_8x16_retrieval_msrvtt-9k.log) | + +### Video Question-Answering on MSRVTT-QA + +| frame sampling strategy | resolution | gpus | vision encoder | text encoder | pretraining | top1 acc | config | ckpt | log | +| :---------------------: | :--------: | :--: | :------------: | :----------: | :--------------------: | :------: | :-----------------------------------: | :---------------------------------: | :---------------------------------: | +| uniform 12 | 224x224 | 8 | BEiT-Base | Bert-Base | C5M (WebVid-2M + CC3M) | 43.6 | [config](/configs/multimodal/vindlu/vindlu_beit-base_8x8_vqa_msrvtt-qa.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_beit-base_8x8_vqa_msrvtt-qa/vindlu_beit-base_8x8_vqa_msrvtt-qa_20230906-6e693e64.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_beit-base_8x8_vqa_msrvtt-qa/vindlu_beit-base_8x8_vqa_msrvtt-qa.log) | + +### Multiple-Choice Question-Answering on MSRVTT-MC (Inference) + +| frame sampling strategy | resolution | gpus | vision encoder | text encoder | pretraining | top1 acc | config | ckpt | +| :---------------------: | :--------: | :--: | :------------: | :----------: | :--------------------: | :------: | :----------------------------------------------------: | :---------------------------------------------------: | +| uniform 12 | 224x224 | 8 | BEiT-Base | Bert-Base | C5M (WebVid-2M + CC3M) | 97.6 | [config](/configs/multimodal/vindlu/vindlu_beit-base_vqa-mc_msrvtt-mc.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k/vindlu_beit-base_8x16_retrieval_msrvtt-9k_20230905-fc36231e.pth) | + +1. Currently, we only support the fine-tuning stage of VindLU models based on the pretrained checkpoint provided by the [original repo](https://github.com/klauscc/VindLU). + +For more details on data preparation, you can refer to [prepare msrvtt](/tools/data/msrvtt/README.md). + +## Train + +You can use the following command to train a model. + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train VindLU model on MSRVTT-9k dataset in a deterministic option with periodic validation. + +```shell +python tools/train.py configs/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k.py \ + --seed 0 --deterministic +``` + +For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/train_test.md). + +## Test + +You can use the following command to test a model. + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test CLIP4Clip model on MSRVTT-9k dataset and dump the result to a pkl file. + +```shell +python tools/test.py cconfigs/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k.py \ + checkpoints/SOME_CHECKPOINT.pth --dump result.pkl +``` + +For more details, you can refer to the **Test** part in the [Training and Test Tutorial](/docs/en/user_guides/train_test.md). + +## Citation + +```BibTeX +@inproceedings{cheng2023vindlu, + title={Vindlu: A recipe for effective video-and-language pretraining}, + author={Cheng, Feng and Wang, Xizi and Lei, Jie and Crandall, David and Bansal, Mohit and Bertasius, Gedas}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10739--10750}, + year={2023} +} +``` diff --git a/configs/multimodal/vindlu/metafile.yml b/configs/multimodal/vindlu/metafile.yml new file mode 100644 index 0000000000..d7fdf7fe24 --- /dev/null +++ b/configs/multimodal/vindlu/metafile.yml @@ -0,0 +1,55 @@ +Collections: + - Name: VindLU + README: configs/multimodal/vindlu/README.md + Paper: + URL: https://arxiv.org/abs/2212.05051 + Title: 'VindLU: A Recipe for Effective Video-and-Language Pretraining' + +Models: + - Name: vindlu_beit-base_8x16_retrieval_msrvtt-9k + Config: configs/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k.py + In Collection: VindLU + Metadata: + Architecture: BEiT-Base + Batch Size: 16 + Epochs: 5 + Training Data: MSRVTT-9k + Training Resources: 8 GPUs + Results: + Dataset: MSRVTT + Task: Video Retrieval + Metrics: + Recall@1: 44.0 + Recall@5: 70.6 + Recall@10: 80.0 + Training Log: https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k/vindlu_beit-base_8x16_retrieval_msrvtt-9k.log + Weights: https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k/vindlu_beit-base_8x16_retrieval_msrvtt-9k_20230905-fc36231e.pth + + - Name: vindlu_beit-base_8x8_vqa_msrvtt-qa + Config: configs/multimodal/vindlu/vindlu_beit-base_8x8_vqa_msrvtt-qa.py + In Collection: VindLU + Metadata: + Architecture: BEiT-Base + Batch Size: 8 + Epochs: 10 + Training Data: MSRVTT-qa + Training Resources: 8 GPUs + Results: + Dataset: MSRVTT + Task: Video Question-Answering + Metrics: + Top 1 Accuracy: 43.6 + Training Log: https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_beit-base_8x8_vqa_msrvtt-qa/vindlu_beit-base_8x8_vqa_msrvtt-qa.log + Weights: https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_beit-base_8x8_vqa_msrvtt-qa/vindlu_beit-base_8x8_vqa_msrvtt-qa_20230906-6e693e64.pth + + - Name: vindlu_beit-base_vqa-mc_msrvtt-mc + Config: configs/multimodal/vindlu/vindlu_beit-base_vqa-mc_msrvtt-mc.py + In Collection: VindLU + Metadata: + Architecture: BEiT-Base + Results: + Dataset: MSRVTT-MC + Task: Multiple-Choice Question-Answering + Metrics: + Top 1 Accuracy: 97.6 + Weights: https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k/vindlu_beit-base_8x16_retrieval_msrvtt-9k_20230905-fc36231e.pth diff --git a/configs/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k.py b/configs/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k.py new file mode 100644 index 0000000000..fd20acbc24 --- /dev/null +++ b/configs/multimodal/vindlu/vindlu_beit-base_8x16_retrieval_msrvtt-9k.py @@ -0,0 +1,200 @@ +_base_ = ['../../_base_/default_runtime.py'] + +video_root = 'data/msrvtt/videos_2fps_224' +anno_file_train = 'data/msrvtt/annotations/msrvtt_ret_train9k.json' +anno_file_test = 'data/msrvtt/annotations/msrvtt_ret_test1k.json' +pretrained_ckpt_url = 'https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_c5m_pretrain.pth' # noqa: E501 + +# model settings +model = dict( + type='VindLURetrieval', + gradient_checkpointing=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained_ckpt_url), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[128], + std=[128], + format_shape='NCTHW'), + tokenizer=dict( + type='VindLUTokenizer', + pretrained_model_name_or_path='bert-base-uncased'), + vision_encoder=dict( + type='BeitModel3D', + config='microsoft/beit-base-patch16-224-pt22k-ft22k', + tem_config=dict( + num_frames=12, + temporal_model_block='timesformer', + temporal_model_position='last', + temporal_model_config=dict(input_dim=768), + use_temporal_position_embedding=True), + encoder_width=768, + add_ln=True), + text_encoder=dict( + type='XBertModel', + pretrained_model_name_or_path='bert-base-uncased', + encoder_width=768, + fusion_layer=9, + add_pooling_layer=False), + proj_dim=256, + temperature=0.07, + max_txt_len=32, + topk=128) + +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=12, + out_of_bound_opt='repeat_last', + ), + dict(type='DecordDecode'), + dict(type='RandomResizedCrop', area_range=(0.5, 1.0)), + dict( + type='Resize', + scale=(224, 224), + keep_ratio=False, + interpolation='bicubic'), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict( + type='PackActionInputs', + algorithm_keys=( + 'text', + 'gt_video_id', + 'gt_text_id', + )) +] + +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=12, + test_mode=True, + out_of_bound_opt='repeat_last'), + dict(type='DecordDecode'), + dict( + type='Resize', + scale=(224, 224), + keep_ratio=False, + interpolation='bicubic'), + dict(type='FormatShape', input_format='NCHW'), + dict( + type='PackActionInputs', + algorithm_keys=( + 'text', + 'gt_video_id', + 'gt_text_id', + )) +] + +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=12, + test_mode=True, + out_of_bound_opt='repeat_last'), + dict(type='DecordDecode'), + dict( + type='Resize', + scale=(224, 224), + keep_ratio=False, + interpolation='bicubic'), + dict(type='FormatShape', input_format='NCHW'), + dict( + type='PackActionInputs', + algorithm_keys=( + 'text', + 'gt_video_id', + 'gt_text_id', + )) +] + +dataset_type = 'MSRVTTRetrieval' + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=anno_file_train, + pipeline=train_pipeline, + data_prefix=dict(video=video_root), + )) + +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=anno_file_test, + pipeline=test_pipeline, + data_prefix=dict(video=video_root), + )) + +test_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=anno_file_test, + pipeline=test_pipeline, + data_prefix=dict(video=video_root), + )) + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=5, val_begin=1, val_interval=1) +val_cfg = dict(type='RetrievalValLoop') +test_cfg = dict(type='RetrievalTestLoop') + +val_evaluator = dict(type='RetrievalRecall', topk=(1, 5, 10)) +test_evaluator = dict(type='RetrievalRecall', topk=(1, 5, 10)) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=5, + eta_min_ratio=0.01, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.02), + paramwise_cfg=dict( + bypass_duplicate=True, norm_decay_mult=0.0, bias_decay_mult=0.0), + clip_grad=dict(max_norm=50, norm_type=2), +) + +model_wrapper_cfg = dict(type='MMDistributedDataParallel', static_graph=True) + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=1, + save_best='t2i/retrieval/Recall@1', + rule='greater'), + logger=dict(type='LoggerHook', interval=20, ignore_last=False)) + +auto_scale_lr = dict(enable=True, base_batch_size=128) + +find_unused_parameters = True + +custom_hooks = [dict(type='EmptyCacheHook', after_epoch=True)] diff --git a/configs/multimodal/vindlu/vindlu_beit-base_8x8_vqa_msrvtt-qa.py b/configs/multimodal/vindlu/vindlu_beit-base_8x8_vqa_msrvtt-qa.py new file mode 100644 index 0000000000..461b045cdb --- /dev/null +++ b/configs/multimodal/vindlu/vindlu_beit-base_8x8_vqa_msrvtt-qa.py @@ -0,0 +1,190 @@ +_base_ = ['../../_base_/default_runtime.py'] + +video_root = 'data/msrvtt/videos_2fps_224' +anno_file_train = 'data/msrvtt/annotations/msrvtt_qa_train.json' +anno_file_val = 'data/msrvtt/annotations/msrvtt_qa_val.json' +anno_file_test = 'data/msrvtt/annotations/msrvtt_qa_test.json' +answer_list_file = 'data/msrvtt/annotations/msrvtt_qa_answer_list.json' +pretrained_ckpt_url = 'https://download.openmmlab.com/mmaction/v1.0/multimodal/vindlu/vindlu_c5m_pretrain.pth' # noqa: E501 + +# model settings +model = dict( + type='VindLUVQA', + init_cfg=dict(type='Pretrained', checkpoint=pretrained_ckpt_url), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[128], + std=[128], + format_shape='NCTHW'), + tokenizer=dict( + type='VindLUTokenizer', + pretrained_model_name_or_path='bert-base-uncased', + ), + vision_encoder=dict( + type='BeitModel3D', + config='microsoft/beit-base-patch16-224-pt22k-ft22k', + tem_config=dict( + num_frames=12, + temporal_model_block='timesformer', + temporal_model_position='last', + temporal_model_config=dict(input_dim=768), + use_temporal_position_embedding=True), + encoder_width=768, + add_ln=True), + text_encoder=dict( + type='XBertModel', + pretrained_model_name_or_path='bert-base-uncased', + encoder_width=768, + fusion_layer=9, + add_pooling_layer=False), + text_decoder=dict( + type='BertDecoder', + pretrained_model_name_or_path='bert-base-uncased', + encoder_width=768, + fusion_layer=0, + num_hidden_layers=3, + add_pooling_layer=True), + proj_dim=256, + temperature=0.07, + max_question_len=25, + max_answer_len=5, + num_ans_candidates=128, + gradient_checkpointing=True, + answer_list_path=answer_list_file) + +file_client_args = dict(io_backend='disk') + +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=12, + out_of_bound_opt='repeat_last'), + dict(type='DecordDecode'), + dict(type='RandomResizedCrop', area_range=(0.5, 1.0)), + dict( + type='Resize', + scale=(224, 224), + keep_ratio=False, + interpolation='bicubic'), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict( + type='PackActionInputs', + algorithm_keys=( + 'question', + 'question_id', + 'gt_answer', + 'gt_answer_weight', + )) +] + +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=12, + test_mode=True, + out_of_bound_opt='repeat_last'), + dict(type='DecordDecode'), + dict( + type='Resize', + scale=(224, 224), + keep_ratio=False, + interpolation='bicubic'), + dict(type='FormatShape', input_format='NCHW'), + dict( + type='PackActionInputs', + algorithm_keys=( + 'question', + 'gt_answer', + 'question_id', + )) +] + +test_pipeline = val_pipeline + +dataset_type = 'MSRVTTVQA' + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=anno_file_train, + pipeline=train_pipeline, + data_prefix=dict(video=video_root), + )) + +val_dataloader = dict( + batch_size=16, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=anno_file_val, + pipeline=val_pipeline, + data_prefix=dict(video=video_root), + )) + +test_dataloader = dict( + batch_size=16, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=anno_file_test, + pipeline=test_pipeline, + data_prefix=dict(video=video_root), + )) + +val_evaluator = dict(type='VQAAcc') +test_evaluator = dict(type='VQAAcc') + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=10, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.01, + by_epoch=True, + begin=0, + end=1, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=10, + eta_min_ratio=0.01, + by_epoch=True, + begin=1, + end=10, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.02), + paramwise_cfg=dict( + bypass_duplicate=True, norm_decay_mult=0.0, bias_decay_mult=0.0), + clip_grad=dict(max_norm=50, norm_type=2), +) + +model_wrapper_cfg = dict(type='MMDistributedDataParallel', static_graph=True) + +default_hooks = dict( + logger=dict(type='LoggerHook', interval=20, ignore_last=False)) + +auto_scale_lr = dict(enable=True, base_batch_size=32) + +find_unused_parameters = True diff --git a/configs/multimodal/vindlu/vindlu_beit-base_vqa-mc_msrvtt-mc.py b/configs/multimodal/vindlu/vindlu_beit-base_vqa-mc_msrvtt-mc.py new file mode 100644 index 0000000000..7ec0271928 --- /dev/null +++ b/configs/multimodal/vindlu/vindlu_beit-base_vqa-mc_msrvtt-mc.py @@ -0,0 +1,80 @@ +_base_ = ['../../_base_/default_runtime.py'] + +video_root = 'data/msrvtt/videos_2fps_224' +anno_file_test = 'data/msrvtt/annotations/msrvtt_mc_test.json' + +# model settings +model = dict( + type='VindLURetrievalMC', + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[128], + std=[128], + format_shape='NCTHW'), + tokenizer=dict( + type='VindLUTokenizer', + pretrained_model_name_or_path='bert-base-uncased'), + vision_encoder=dict( + type='BeitModel3D', + config='microsoft/beit-base-patch16-224-pt22k-ft22k', + tem_config=dict( + num_frames=12, + temporal_model_block='timesformer', + temporal_model_position='last', + temporal_model_config=dict(input_dim=768), + use_temporal_position_embedding=True), + encoder_width=768, + add_ln=True), + text_encoder=dict( + type='XBertModel', + pretrained_model_name_or_path='bert-base-uncased', + encoder_width=768, + fusion_layer=9, + add_pooling_layer=False), + text_decoder=dict( + type='BertDecoder', + pretrained_model_name_or_path='bert-base-uncased', + encoder_width=768, + fusion_layer=0, + num_hidden_layers=3, + add_pooling_layer=True), + proj_dim=256, + temperature=0.07, + max_txt_len=32, + gradient_checkpointing=True) + +file_client_args = dict(io_backend='disk') + +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=12, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs', algorithm_keys=('caption_options', )) +] + +dataset_type = 'MSRVTTVQAMC' + +test_dataloader = dict( + batch_size=32, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=anno_file_test, + pipeline=test_pipeline, + data_prefix=dict(video=video_root), + )) + +test_evaluator = dict(type='VQAMCACC') +test_cfg = dict(type='TestLoop') + +default_hooks = dict( + logger=dict(type='LoggerHook', interval=20, ignore_last=False), ) diff --git a/mmaction/datasets/__init__.py b/mmaction/datasets/__init__.py index ded946b727..cc838f8f31 100644 --- a/mmaction/datasets/__init__.py +++ b/mmaction/datasets/__init__.py @@ -3,6 +3,7 @@ from .audio_dataset import AudioDataset from .ava_dataset import AVADataset, AVAKineticsDataset from .base import BaseActionDataset +from .msrvtt_datasets import MSRVTTVQA, MSRVTTVQAMC, MSRVTTRetrieval from .pose_dataset import PoseDataset from .rawframe_dataset import RawframeDataset from .repeat_aug_dataset import RepeatAugDataset, repeat_pseudo_collate @@ -13,5 +14,6 @@ __all__ = [ 'AVADataset', 'AVAKineticsDataset', 'ActivityNetDataset', 'AudioDataset', 'BaseActionDataset', 'PoseDataset', 'RawframeDataset', 'RepeatAugDataset', - 'VideoDataset', 'repeat_pseudo_collate', 'VideoTextDataset' + 'VideoDataset', 'repeat_pseudo_collate', 'VideoTextDataset', + 'MSRVTTRetrieval', 'MSRVTTVQA', 'MSRVTTVQAMC' ] diff --git a/mmaction/datasets/msrvtt_datasets.py b/mmaction/datasets/msrvtt_datasets.py new file mode 100644 index 0000000000..058734c01d --- /dev/null +++ b/mmaction/datasets/msrvtt_datasets.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os.path as osp +import re +from collections import Counter +from typing import Dict, List + +from mmengine.fileio import exists + +from mmaction.registry import DATASETS +from .base import BaseActionDataset + + +@DATASETS.register_module() +class MSRVTTVQA(BaseActionDataset): + """MSR-VTT Video Question Answering dataset.""" + + def load_data_list(self) -> List[Dict]: + """Load annotation file to get video information.""" + exists(self.ann_file) + data_list = [] + + with open(self.ann_file) as f: + data_lines = json.load(f) + for data in data_lines: + answers = data['answer'] + if isinstance(answers, str): + answers = [answers] + count = Counter(answers) + answer_weight = [i / len(answers) for i in count.values()] + data_item = dict( + question_id=data['question_id'], + filename=osp.join(self.data_prefix['video'], + data['video']), + question=pre_text(data['question']), + gt_answer=list(count.keys()), + gt_answer_weight=answer_weight) + data_list.append(data_item) + + return data_list + + +@DATASETS.register_module() +class MSRVTTVQAMC(BaseActionDataset): + """MSR-VTT VQA multiple choices dataset.""" + + def load_data_list(self) -> List[Dict]: + """Load annotation file to get video information.""" + exists(self.ann_file) + data_list = [] + + with open(self.ann_file) as f: + data_lines = json.load(f) + for data in data_lines: + data_item = dict( + filename=osp.join(self.data_prefix['video'], + data['video']), + label=data['answer'], + caption_options=[pre_text(c) for c in data['caption']]) + data_list.append(data_item) + + return data_list + + +@DATASETS.register_module() +class MSRVTTRetrieval(BaseActionDataset): + """MSR-VTT Retrieval dataset.""" + + def load_data_list(self) -> List[Dict]: + """Load annotation file to get video information.""" + exists(self.ann_file) + data_list = [] + + with open(self.ann_file) as f: + data_lines = json.load(f) + video_idx = 0 + text_idx = 0 + for data in data_lines: + # don't consider multiple videos or multiple captions + video_path = osp.join(self.data_prefix['video'], data['video']) + data_item = dict( + filename=video_path, + text=[], + gt_video_id=[], + gt_text_id=[]) + if isinstance(data['caption'], str): + data['caption'] = [data['caption']] + + for text in data['caption']: + text = pre_text(text) + data_item['text'].append(text) + data_item['gt_video_id'].append(video_idx) + data_item['gt_text_id'].append(text_idx) + text_idx += 1 + + video_idx += 1 + data_list.append(data_item) + self.num_videos = video_idx + self.num_texts = text_idx + + return data_list + + +def pre_text(text, max_l=None): + text = re.sub(r"([,.'!?\"()*#:;~])", '', text.lower()) + text = text.replace('-', ' ').replace('/', + ' ').replace('', 'person') + + text = re.sub(r'\s{2,}', ' ', text) + text = text.rstrip('\n').strip(' ') + + if max_l: # truncate + words = text.split(' ') + if len(words) > max_l: + text = ' '.join(words[:max_l]) + return text diff --git a/mmaction/datasets/transforms/formatting.py b/mmaction/datasets/transforms/formatting.py index fb67e10c0e..a8e9b9ab82 100644 --- a/mmaction/datasets/transforms/formatting.py +++ b/mmaction/datasets/transforms/formatting.py @@ -20,6 +20,8 @@ class PackActionInputs(BaseTransform): meta_keys (Sequence[str]): The meta keys to saved in the `metainfo` of the `data_sample`. Defaults to ``('img_shape', 'img_key', 'video_id', 'timestamp')``. + algorithm_keys (Sequence[str]): The keys of custom elements to be used + in the algorithm. Defaults to an empty tuple. """ mapping_table = { @@ -28,13 +30,15 @@ class PackActionInputs(BaseTransform): } def __init__( - self, - collect_keys: Optional[Tuple[str]] = None, - meta_keys: Sequence[str] = ('img_shape', 'img_key', 'video_id', - 'timestamp') + self, + collect_keys: Optional[Tuple[str]] = None, + meta_keys: Sequence[str] = ('img_shape', 'img_key', 'video_id', + 'timestamp'), + algorithm_keys: Sequence[str] = (), ) -> None: self.collect_keys = collect_keys self.meta_keys = meta_keys + self.algorithm_keys = algorithm_keys def transform(self, results: Dict) -> Dict: """The transform function of :class:`PackActionInputs`. @@ -88,6 +92,12 @@ def transform(self, results: Dict) -> Dict: if 'label' in results: data_sample.set_gt_label(results['label']) + # Set custom algorithm keys + for key in self.algorithm_keys: + if key in results: + data_sample.set_field(results[key], key) + + # Set meta keys img_meta = {k: results[k] for k in self.meta_keys if k in results} data_sample.set_metainfo(img_meta) packed_results['data_samples'] = data_sample diff --git a/mmaction/datasets/transforms/processing.py b/mmaction/datasets/transforms/processing.py index 3d432bd723..6d54c6bf24 100644 --- a/mmaction/datasets/transforms/processing.py +++ b/mmaction/datasets/transforms/processing.py @@ -613,8 +613,9 @@ class Resize(BaseTransform): keep_ratio (bool): If set to True, Images will be resized without changing the aspect ratio. Otherwise, it will resize images to a given size. Default: True. - interpolation (str): Algorithm used for interpolation: - "nearest" | "bilinear". Default: "bilinear". + interpolation (str): Algorithm used for interpolation, + accepted values are "nearest", "bilinear", "bicubic", "area", + "lanczos". Default: "bilinear". lazy (bool): Determine whether to apply lazy operation. Default: False. """ diff --git a/mmaction/engine/runner/__init__.py b/mmaction/engine/runner/__init__.py index c7dc511ea8..9bc36f001b 100644 --- a/mmaction/engine/runner/__init__.py +++ b/mmaction/engine/runner/__init__.py @@ -1,4 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. from .multi_loop import MultiLoaderEpochBasedTrainLoop +from .retrieval_loop import RetrievalTestLoop, RetrievalValLoop -__all__ = ['MultiLoaderEpochBasedTrainLoop'] +__all__ = [ + 'MultiLoaderEpochBasedTrainLoop', 'RetrievalValLoop', 'RetrievalTestLoop' +] diff --git a/mmaction/engine/runner/retrieval_loop.py b/mmaction/engine/runner/retrieval_loop.py new file mode 100644 index 0000000000..dc884876da --- /dev/null +++ b/mmaction/engine/runner/retrieval_loop.py @@ -0,0 +1,168 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch +from mmengine.model import is_model_wrapper +from mmengine.runner import TestLoop, ValLoop, autocast + +from mmaction.registry import LOOPS + + +@LOOPS.register_module() +class RetrievalValLoop(ValLoop): + """Loop for multimodal retrieval val. + + Args: + runner (Runner): A reference of runner. + dataloader (Dataloader or dict): A dataloader object or a dict to + build a dataloader. + evaluator (Evaluator or dict or list): Used for computing metrics. + fp16 (bool): Whether to enable fp16 valing. Defaults to + False. + """ + + def run(self) -> dict: + """Launch val.""" + self.runner.call_hook('before_val') + self.runner.call_hook('before_val_epoch') + self.runner.model.eval() + + feats_local = [] + data_samples_local = [] + + for idx, data_batch in enumerate(self.dataloader): + with torch.no_grad(): + self.runner.call_hook( + 'before_val_iter', batch_idx=idx, data_batch=data_batch) + # predictions should be sequence of BaseDataElement + with autocast(enabled=self.fp16): + if is_model_wrapper(self.runner.model): + data_preprocessor = self.runner.model.module.data_preprocessor # noqa: E501 + else: + data_preprocessor = self.runner.model.data_preprocessor + + # get features for retrieval instead of data samples + data_batch = data_preprocessor(data_batch, False) + feats = self.runner.model._run_forward( + data_batch, mode='tensor') + feats_local.append(feats) + data_samples_local.extend(data_batch['data_samples']) + self.runner.call_hook( + 'after_val_iter', + batch_idx=idx, + data_batch=data_batch, + outputs=feats) + + # concatenate different features + feats_local = { + k: torch.cat([dic[k] for dic in feats_local]) + for k in feats_local[0] + } + + # get predictions + if is_model_wrapper(self.runner.model): + predict_all_fn = self.runner.model.module.predict_all + else: + predict_all_fn = self.runner.model.predict_all + + num_videos = self.dataloader.dataset.num_videos + num_texts = self.dataloader.dataset.num_texts + with torch.no_grad(): + with autocast(enabled=self.fp16): + i2t_data_samples, t2i_data_samples = predict_all_fn( + feats_local, + data_samples_local, + num_images=num_videos, + num_texts=num_texts, + ) + # process in evaluator and compute metrics + self.evaluator.process(i2t_data_samples, None) + i2t_metrics = self.evaluator.evaluate(num_videos) + i2t_metrics = {f'i2t/{k}': v for k, v in i2t_metrics.items()} + self.evaluator.process(t2i_data_samples, None) + t2i_metrics = self.evaluator.evaluate(num_texts) + t2i_metrics = {f't2i/{k}': v for k, v in t2i_metrics.items()} + metrics = {**i2t_metrics, **t2i_metrics} + self.runner.call_hook('after_val_epoch', metrics=metrics) + self.runner.call_hook('after_val') + return metrics + + +@LOOPS.register_module() +class RetrievalTestLoop(TestLoop): + """Loop for multimodal retrieval test. + + Args: + runner (Runner): A reference of runner. + dataloader (Dataloader or dict): A dataloader object or a dict to + build a dataloader. + evaluator (Evaluator or dict or list): Used for computing metrics. + fp16 (bool): Whether to enable fp16 testing. Defaults to + False. + """ + + def run(self) -> dict: + """Launch test.""" + self.runner.call_hook('before_test') + self.runner.call_hook('before_test_epoch') + self.runner.model.eval() + + feats_local = [] + data_samples_local = [] + + for idx, data_batch in enumerate(self.dataloader): + with torch.no_grad(): + self.runner.call_hook( + 'before_test_iter', batch_idx=idx, data_batch=data_batch) + # predictions should be sequence of BaseDataElement + with autocast(enabled=self.fp16): + if is_model_wrapper(self.runner.model): + data_preprocessor = self.runner.model.module.data_preprocessor # noqa: E501 + else: + data_preprocessor = self.runner.model.data_preprocessor + # get features for retrieval instead of data samples + data_batch = data_preprocessor(data_batch, False) + feats = self.runner.model._run_forward( + data_batch, mode='tensor') + feats_local.append(feats) + data_samples_local.extend(data_batch['data_samples']) + self.runner.call_hook( + 'after_test_iter', + batch_idx=idx, + data_batch=data_batch, + outputs=feats) + + # concatenate different features + feats_local = { + k: torch.cat([dic[k] for dic in feats_local]) + for k in feats_local[0] + } + + # get predictions + if is_model_wrapper(self.runner.model): + predict_all_fn = self.runner.model.module.predict_all + else: + predict_all_fn = self.runner.model.predict_all + + num_videos = self.dataloader.dataset.num_videos + num_texts = self.dataloader.dataset.num_texts + with torch.no_grad(): + with autocast(enabled=self.fp16): + i2t_data_samples, t2i_data_samples = predict_all_fn( + feats_local, + data_samples_local, + num_images=num_videos, + num_texts=num_texts, + ) + + # process in evaluator and compute metrics + self.evaluator.process(i2t_data_samples, None) + i2t_metrics = self.evaluator.evaluate(num_videos) + i2t_metrics = {f'i2t/{k}': v for k, v in i2t_metrics.items()} + self.evaluator.process(t2i_data_samples, None) + t2i_metrics = self.evaluator.evaluate(num_texts) + t2i_metrics = {f't2i/{k}': v for k, v in t2i_metrics.items()} + metrics = {**i2t_metrics, **t2i_metrics} + + self.runner.call_hook('after_test_epoch', metrics=metrics) + self.runner.call_hook('after_test') + return metrics diff --git a/mmaction/evaluation/metrics/__init__.py b/mmaction/evaluation/metrics/__init__.py index 8bf22c6672..341ec577ce 100644 --- a/mmaction/evaluation/metrics/__init__.py +++ b/mmaction/evaluation/metrics/__init__.py @@ -2,10 +2,12 @@ from .acc_metric import AccMetric, ConfusionMatrix from .anet_metric import ANetMetric from .ava_metric import AVAMetric +from .multimodal_metric import VQAMCACC, ReportVQA, RetrievalRecall, VQAAcc from .multisports_metric import MultiSportsMetric from .retrieval_metric import RetrievalMetric __all__ = [ 'AccMetric', 'AVAMetric', 'ANetMetric', 'ConfusionMatrix', - 'MultiSportsMetric', 'RetrievalMetric' + 'MultiSportsMetric', 'RetrievalMetric', 'VQAAcc', 'ReportVQA', 'VQAMCACC', + 'RetrievalRecall' ] diff --git a/mmaction/evaluation/metrics/multimodal_metric.py b/mmaction/evaluation/metrics/multimodal_metric.py new file mode 100644 index 0000000000..2c144ac10a --- /dev/null +++ b/mmaction/evaluation/metrics/multimodal_metric.py @@ -0,0 +1,565 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Copied from mmpretrain +# Partly adopted from https://github.com/GT-Vision-Lab/VQA +# Copyright (c) 2014, Aishwarya Agrawal +from typing import List, Optional, Sequence, Union + +import mmengine +import numpy as np +import torch +import torch.nn.functional as F +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger +from mmengine.utils import is_seq_of + +from mmaction.registry import METRICS +from mmaction.structures.action_data_sample import format_label +from .acc_metric import to_tensor + + +def _process_punctuation(inText): + import re + outText = inText + punct = [ + ';', r'/', '[', ']', '"', '{', '}', '(', ')', '=', '+', '\\', '_', '-', + '>', '<', '@', '`', ',', '?', '!' + ] + commaStrip = re.compile('(\d)(,)(\d)') # noqa: W605 + periodStrip = re.compile('(?!<=\d)(\.)(?!\d)') # noqa: W605 + for p in punct: + if (p + ' ' in inText or ' ' + p in inText) or (re.search( + commaStrip, inText) is not None): + outText = outText.replace(p, '') + else: + outText = outText.replace(p, ' ') + outText = periodStrip.sub('', outText, re.UNICODE) + return outText + + +def _process_digit_article(inText): + outText = [] + tempText = inText.lower().split() + articles = ['a', 'an', 'the'] + manualMap = { + 'none': '0', + 'zero': '0', + 'one': '1', + 'two': '2', + 'three': '3', + 'four': '4', + 'five': '5', + 'six': '6', + 'seven': '7', + 'eight': '8', + 'nine': '9', + 'ten': '10', + } + contractions = { + 'aint': "ain't", + 'arent': "aren't", + 'cant': "can't", + 'couldve': "could've", + 'couldnt': "couldn't", + "couldn'tve": "couldn't've", + "couldnt've": "couldn't've", + 'didnt': "didn't", + 'doesnt': "doesn't", + 'dont': "don't", + 'hadnt': "hadn't", + "hadnt've": "hadn't've", + "hadn'tve": "hadn't've", + 'hasnt': "hasn't", + 'havent': "haven't", + 'hed': "he'd", + "hed've": "he'd've", + "he'dve": "he'd've", + 'hes': "he's", + 'howd': "how'd", + 'howll': "how'll", + 'hows': "how's", + "Id've": "I'd've", + "I'dve": "I'd've", + 'Im': "I'm", + 'Ive': "I've", + 'isnt': "isn't", + 'itd': "it'd", + "itd've": "it'd've", + "it'dve": "it'd've", + 'itll': "it'll", + "let's": "let's", + 'maam': "ma'am", + 'mightnt': "mightn't", + "mightnt've": "mightn't've", + "mightn'tve": "mightn't've", + 'mightve': "might've", + 'mustnt': "mustn't", + 'mustve': "must've", + 'neednt': "needn't", + 'notve': "not've", + 'oclock': "o'clock", + 'oughtnt': "oughtn't", + "ow's'at": "'ow's'at", + "'ows'at": "'ow's'at", + "'ow'sat": "'ow's'at", + 'shant': "shan't", + "shed've": "she'd've", + "she'dve": "she'd've", + "she's": "she's", + 'shouldve': "should've", + 'shouldnt': "shouldn't", + "shouldnt've": "shouldn't've", + "shouldn'tve": "shouldn't've", + "somebody'd": 'somebodyd', + "somebodyd've": "somebody'd've", + "somebody'dve": "somebody'd've", + 'somebodyll': "somebody'll", + 'somebodys': "somebody's", + 'someoned': "someone'd", + "someoned've": "someone'd've", + "someone'dve": "someone'd've", + 'someonell': "someone'll", + 'someones': "someone's", + 'somethingd': "something'd", + "somethingd've": "something'd've", + "something'dve": "something'd've", + 'somethingll': "something'll", + 'thats': "that's", + 'thered': "there'd", + "thered've": "there'd've", + "there'dve": "there'd've", + 'therere': "there're", + 'theres': "there's", + 'theyd': "they'd", + "theyd've": "they'd've", + "they'dve": "they'd've", + 'theyll': "they'll", + 'theyre': "they're", + 'theyve': "they've", + 'twas': "'twas", + 'wasnt': "wasn't", + "wed've": "we'd've", + "we'dve": "we'd've", + 'weve': "we've", + 'werent': "weren't", + 'whatll': "what'll", + 'whatre': "what're", + 'whats': "what's", + 'whatve': "what've", + 'whens': "when's", + 'whered': "where'd", + 'wheres': "where's", + 'whereve': "where've", + 'whod': "who'd", + "whod've": "who'd've", + "who'dve": "who'd've", + 'wholl': "who'll", + 'whos': "who's", + 'whove': "who've", + 'whyll': "why'll", + 'whyre': "why're", + 'whys': "why's", + 'wont': "won't", + 'wouldve': "would've", + 'wouldnt': "wouldn't", + "wouldnt've": "wouldn't've", + "wouldn'tve": "wouldn't've", + 'yall': "y'all", + "yall'll": "y'all'll", + "y'allll": "y'all'll", + "yall'd've": "y'all'd've", + "y'alld've": "y'all'd've", + "y'all'dve": "y'all'd've", + 'youd': "you'd", + "youd've": "you'd've", + "you'dve": "you'd've", + 'youll': "you'll", + 'youre': "you're", + 'youve': "you've", + } + for word in tempText: + word = manualMap.setdefault(word, word) + if word not in articles: + outText.append(word) + for wordId, word in enumerate(outText): + if word in contractions: + outText[wordId] = contractions[word] + outText = ' '.join(outText) + return outText + + +@METRICS.register_module() +class VQAAcc(BaseMetric): + '''VQA Acc metric. + Args: + + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Should be modified according to the + `retrieval_type` for unambiguous results. Defaults to TR. + ''' + default_prefix = 'VQA' + + def __init__(self, + full_score_weight: float = 0.3, + collect_device: str = 'cpu', + prefix: Optional[str] = None): + super().__init__(collect_device=collect_device, prefix=prefix) + self.full_score_weight = full_score_weight + + def process(self, data_batch, data_samples): + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for sample in data_samples: + gt_answer = sample.get('gt_answer') + gt_answer_weight = sample.get('gt_answer_weight') + if isinstance(gt_answer, str): + gt_answer = [gt_answer] + if gt_answer_weight is None: + gt_answer_weight = [1. / (len(gt_answer))] * len(gt_answer) + + result = { + 'pred_answer': sample.get('pred_answer'), + 'gt_answer': gt_answer, + 'gt_answer_weight': gt_answer_weight, + } + + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (dict): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + acc = [] + for result in results: + pred_answer = self._process_answer(result['pred_answer']) + gt_answer = [ + self._process_answer(answer) for answer in result['gt_answer'] + ] + answer_weight = result['gt_answer_weight'] + + weight_sum = 0 + for i, gt in enumerate(gt_answer): + if gt == pred_answer: + weight_sum += answer_weight[i] + vqa_acc = min(1.0, weight_sum / self.full_score_weight) + acc.append(vqa_acc) + + accuracy = sum(acc) / len(acc) * 100 + + metrics = {'acc': accuracy} + return metrics + + def _process_answer(self, answer): + answer = answer.replace('\n', ' ') + answer = answer.replace('\t', ' ') + answer = answer.strip() + answer = _process_punctuation(answer) + answer = _process_digit_article(answer) + return answer + + +@METRICS.register_module() +class ReportVQA(BaseMetric): + """Dump VQA result to the standard json format for VQA evaluation. + + Args: + file_path (str): The file path to save the result file. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Should be modified according to the + `retrieval_type` for unambiguous results. Defaults to TR. + """ + default_prefix = 'VQA' + + def __init__(self, + file_path: str, + collect_device: str = 'cpu', + prefix: Optional[str] = None): + super().__init__(collect_device=collect_device, prefix=prefix) + if not file_path.endswith('.json'): + raise ValueError('The output file must be a json file.') + self.file_path = file_path + + def process(self, data_batch, data_samples) -> None: + """transfer tensors in predictions to CPU.""" + for sample in data_samples: + question_id = sample['question_id'] + pred_answer = sample['pred_answer'] + + result = { + 'question_id': int(question_id), + 'answer': pred_answer, + } + + self.results.append(result) + + def compute_metrics(self, results: List): + """Dump the result to json file.""" + mmengine.dump(results, self.file_path) + logger = MMLogger.get_current_instance() + logger.info(f'Results has been saved to {self.file_path}.') + return {} + + +@METRICS.register_module() +class VQAMCACC(BaseMetric): + '''VQA multiple choice Acc metric. + Args: + + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Should be modified according to the + `retrieval_type` for unambiguous results. Defaults to TR. + ''' + default_prefix = 'VQAMC' + + def __init__(self, + collect_device: str = 'cpu', + prefix: Optional[str] = None): + super().__init__(collect_device=collect_device, prefix=prefix) + + def process(self, data_batch, data_samples): + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for sample in data_samples: + # gt_labels in datasample is a LabelData + label = sample['gt_label'].item() + result = { + 'pred_label': sample.get('pred_label'), + 'gt_label': label, + } + + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (dict): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + preds = np.array([x['pred_label'] for x in results]) + labels = np.array([x['gt_label'] for x in results]) + + accuracy = np.sum(preds == labels) / len(preds) * 100 + + metrics = {'acc': accuracy} + return metrics + + +@METRICS.register_module() +class RetrievalRecall(BaseMetric): + r"""Recall evaluation metric for image retrieval. + + Args: + topk (int | Sequence[int]): If the ground truth label matches one of + the best **k** predictions, the sample will be regard as a positive + prediction. If the parameter is a tuple, all of top-k recall will + be calculated and outputted together. Defaults to 1. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Defaults to None. + + """ + default_prefix: Optional[str] = 'retrieval' + + def __init__(self, + topk: Union[int, Sequence[int]], + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + topk = (topk, ) if isinstance(topk, int) else topk + + for k in topk: + if k <= 0: + raise ValueError('`topk` must be a ingter larger than 0 ' + 'or seq of ingter larger than 0.') + + self.topk = topk + super().__init__(collect_device=collect_device, prefix=prefix) + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]): + """Process one batch of data and predictions. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data from the dataloader. + predictions (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + pred_score = data_sample['pred_score'].cpu() + gt_label = format_label(data_sample['gt_label']) + + if 'gt_score' in data_sample: + target = data_sample.get('gt_score').clone() + else: + num_classes = pred_score.size()[-1] + target = F.one_hot(gt_label, num_classes) + + # Because the retrieval output logit vector will be much larger + # compared to the normal classification, to save resources, the + # evaluation results are computed each batch here and then reduce + # all results at the end. + result = RetrievalRecall.calculate( + pred_score.unsqueeze(0), target.unsqueeze(0), topk=self.topk) + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + result_metrics = dict() + for i, k in enumerate(self.topk): + recall_at_k = sum([r[i].item() for r in results]) / len(results) + result_metrics[f'Recall@{k}'] = recall_at_k + + return result_metrics + + @staticmethod + def calculate(pred: Union[np.ndarray, torch.Tensor], + target: Union[np.ndarray, torch.Tensor], + topk: Union[int, Sequence[int]], + pred_indices: (bool) = False, + target_indices: (bool) = False) -> float: + """Calculate the average recall. + + Args: + pred (torch.Tensor | np.ndarray | Sequence): The prediction + results. A :obj:`torch.Tensor` or :obj:`np.ndarray` with + shape ``(N, M)`` or a sequence of index/onehot + format labels. + target (torch.Tensor | np.ndarray | Sequence): The prediction + results. A :obj:`torch.Tensor` or :obj:`np.ndarray` with + shape ``(N, M)`` or a sequence of index/onehot + format labels. + topk (int, Sequence[int]): Predictions with the k-th highest + scores are considered as positive. + pred_indices (bool): Whether the ``pred`` is a sequence of + category index labels. Defaults to False. + target_indices (bool): Whether the ``target`` is a sequence of + category index labels. Defaults to False. + + Returns: + List[float]: the average recalls. + """ + topk = (topk, ) if isinstance(topk, int) else topk + for k in topk: + if k <= 0: + raise ValueError('`topk` must be a ingter larger than 0 ' + 'or seq of ingter larger than 0.') + + max_keep = max(topk) + pred = _format_pred(pred, max_keep, pred_indices) + target = _format_target(target, target_indices) + + assert len(pred) == len(target), ( + f'Length of `pred`({len(pred)}) and `target` ({len(target)}) ' + f'must be the same.') + + num_samples = len(pred) + results = [] + for k in topk: + recalls = torch.zeros(num_samples) + for i, (sample_pred, + sample_target) in enumerate(zip(pred, target)): + sample_pred = np.array(to_tensor(sample_pred).cpu()) + sample_target = np.array(to_tensor(sample_target).cpu()) + recalls[i] = int(np.in1d(sample_pred[:k], sample_target).max()) + results.append(recalls.mean() * 100) + return results + + +def _format_pred(label, topk=None, is_indices=False): + """format various label to List[indices].""" + if is_indices: + assert isinstance(label, Sequence), \ + '`pred` must be Sequence of indices when' \ + f' `pred_indices` set to True, but get {type(label)}' + for i, sample_pred in enumerate(label): + assert is_seq_of(sample_pred, int) or isinstance( + sample_pred, (np.ndarray, torch.Tensor)), \ + '`pred` should be Sequence of indices when `pred_indices`' \ + f'set to True. but pred[{i}] is {sample_pred}' + if topk: + label[i] = sample_pred[:min(topk, len(sample_pred))] + return label + if isinstance(label, np.ndarray): + label = torch.from_numpy(label) + elif not isinstance(label, torch.Tensor): + raise TypeError(f'The pred must be type of torch.tensor, ' + f'np.ndarray or Sequence but get {type(label)}.') + topk = topk if topk else label.size()[-1] + _, indices = label.topk(topk) + return indices + + +def _format_target(label, is_indices=False): + """format various label to List[indices].""" + if is_indices: + assert isinstance(label, Sequence), \ + '`target` must be Sequence of indices when' \ + f' `target_indices` set to True, but get {type(label)}' + for i, sample_gt in enumerate(label): + assert is_seq_of(sample_gt, int) or isinstance( + sample_gt, (np.ndarray, torch.Tensor)), \ + '`target` should be Sequence of indices when ' \ + f'`target_indices` set to True. but target[{i}] is {sample_gt}' + return label + + if isinstance(label, np.ndarray): + label = torch.from_numpy(label) + elif isinstance(label, Sequence) and not mmengine.is_str(label): + label = torch.tensor(label) + elif not isinstance(label, torch.Tensor): + raise TypeError(f'The pred must be type of torch.tensor, ' + f'np.ndarray or Sequence but get {type(label)}.') + + indices = [sample_gt.nonzero().squeeze(-1) for sample_gt in label] + return indices diff --git a/mmaction/models/__init__.py b/mmaction/models/__init__.py index 6c53b29254..08f7d41f52 100644 --- a/mmaction/models/__init__.py +++ b/mmaction/models/__init__.py @@ -5,6 +5,7 @@ from .heads import * # noqa: F401,F403 from .localizers import * # noqa: F401,F403 from .losses import * # noqa: F401,F403 +from .multimodal import * # noqa: F401,F403 from .necks import * # noqa: F401,F403 from .recognizers import * # noqa: F401,F403 from .roi_heads import * # noqa: F401,F403 diff --git a/mmaction/models/multimodal/__init__.py b/mmaction/models/multimodal/__init__.py new file mode 100644 index 0000000000..9a5f2a99df --- /dev/null +++ b/mmaction/models/multimodal/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmaction.utils.dependency import WITH_MULTIMODAL + +if WITH_MULTIMODAL: + from .vindlu import * # noqa: F401,F403 + +else: + from mmaction.registry import MODELS + from mmaction.utils.dependency import register_multimodal_placeholder + + register_multimodal_placeholder( + ['VindLUVQA', 'VindLURetrievalMC', 'VindLURetrieval'], MODELS) diff --git a/mmaction/models/multimodal/vindlu/__init__.py b/mmaction/models/multimodal/vindlu/__init__.py new file mode 100644 index 0000000000..e17c193246 --- /dev/null +++ b/mmaction/models/multimodal/vindlu/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .beit3d import BeitModel3D +from .tokenizer import VindLUTokenizer +from .vindlu_ret import VindLURetrieval +from .vindlu_ret_mc import VindLURetrievalMC +from .vindlu_vqa import VindLUVQA +from .xbert import BertDecoder, BertModel + +__all__ = [ + 'VindLUVQA', 'VindLURetrievalMC', 'VindLURetrieval', 'VindLUTokenizer', + 'BeitModel3D', 'BertDecoder', 'BertModel' +] diff --git a/mmaction/models/multimodal/vindlu/beit3d.py b/mmaction/models/multimodal/vindlu/beit3d.py new file mode 100644 index 0000000000..8e0d6f2fc3 --- /dev/null +++ b/mmaction/models/multimodal/vindlu/beit3d.py @@ -0,0 +1,350 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import importlib +from typing import Dict, Optional, Tuple, Union + +import einops +import torch +import torch.nn as nn +import torch.nn.functional as F +from transformers.models.beit import BeitConfig, BeitModel +from transformers.models.beit.modeling_beit import BeitAttention, BeitDropPath +from transformers.models.beit.modeling_beit import \ + BeitEmbeddings as BeitEmbeddings2D +from transformers.models.beit.modeling_beit import BeitLayer as BeitLayer2D +from transformers.models.beit.modeling_beit import BeitRelativePositionBias +from transformers.models.beit.modeling_beit import \ + BeitRelativePositionBias as BeitRelativePositionBias2D + +from mmaction.registry import MODELS +from .temporal_model import (X_CLIP, STAdapter, TemporalAttention, + WindowTemporalAttention) + + +def interpolate_temporal_pos_embed(temp_embed_old, num_frames_new): + """ + temp_embed_old: (1, num_frames_old, 1, d) + Returns: + temp_embed_new: (1, num_frames_new, 1, d) + """ + temp_embed_old = temp_embed_old.squeeze(2).permute( + 0, 2, 1) # (1, d, num_frames_old) + temp_embed_new = F.interpolate( + temp_embed_old, num_frames_new, + mode='linear') # (1, d, num_frames_new) + temp_embed_new = temp_embed_new.permute(0, 2, 1).unsqueeze( + 2) # (1, num_frames_new, 1, d) + return temp_embed_new + + +class TemporalAttentionBeit(nn.Module): + """temporal attention using BeitAttention.""" + + def __init__(self, config: BeitConfig): + """TODO: to be defined.""" + super().__init__() + + self.layernorm_before = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.attention = BeitAttention(config, window_size=None) + self.scale = nn.Parameter( + config.temporal_model_init_value * torch.ones( + (config.hidden_size)), + requires_grad=True, + ) + self.drop_path = BeitDropPath(config.drop_path_rate) + + def forward(self, hidden_states: torch.Tensor): + """forward function. + + Args: + hidden_states (torch.Tensor): The input. Shape: [b,t,l,c] + + Returns: TODO + """ + b = hidden_states.shape[0] + output = einops.rearrange(hidden_states, 'b t l c -> (b l) t c') + output = self.layernorm_before(output) + output = self.attention(output) + output = einops.rearrange(output[0], '(b l) t c -> b t l c', b=b) + return hidden_states + self.drop_path(output[0]) * self.scale + + +class BeitPooler3D(nn.Module): + + def __init__(self, config: BeitConfig) -> None: + super().__init__() + self.num_prompts = config.add_k_prompts + self.layernorm = ( + nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + if config.use_mean_pooling else None) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + """ + Args: + hidden_states (torch.Tensor): Shape: [B,T,L,C] + """ + if self.layernorm is not None: + # Mean pool the final hidden states of the patch tokens + # patch_tokens = hidden_states[:, 1 + self.num_prompts :, :] + if self.num_prompts > 0: + patch_tokens = hidden_states[:, :, 1:-self.num_prompts, :] + else: + patch_tokens = hidden_states[:, :, 1:, :] + pooled_output = self.layernorm(patch_tokens.mean(2)) + else: + # Pool by simply taking the final hidden state of the [CLS] token + pooled_output = hidden_states[:, :, 0] + + return pooled_output + + +class BeitRelativePositionBias3D(BeitRelativePositionBias2D): + + def __init__(self, config: BeitConfig, window_size: tuple) -> None: + super().__init__(config, window_size) + + # add bias for prompts + self.k = config.add_k_prompts + if self.k > 0: + self.prompt_bias_table = nn.parameter.Parameter( + torch.zeros((2 + self.k) * self.k, config.num_attention_heads) + ) # k prompt-to-token, k token-to-prompt, k*k prompt-to-promt + else: + self.prompt_bias_table = None + + def forward(self) -> torch.Tensor: + # relative position bias 2d + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, + -1, + ) # Wh*Ww,Wh*Ww,nH + + # add bias for prompts + k = self.k + if k > 0: + l = self.window_size[0] * self.window_size[1] + 1 # noqa: E741 + bias = torch.zeros(l + k, l + k, + relative_position_bias.shape[-1]).to( + relative_position_bias.device) + bias[:l, :l] = relative_position_bias + bias[l:, :l] = self.prompt_bias_table[:k].view( + k, 1, -1) # prompt to token + bias[:l, + l:] = self.prompt_bias_table[k:2 * + k].view(1, k, + -1) # token to prompt + bias[l:, l:] = self.prompt_bias_table[2 * k, :].view( + k, k, -1) # prompt to prompt + else: + bias = relative_position_bias + + return bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class BeitEmbeddings3D(BeitEmbeddings2D): + """Construct the CLS token, position and patch embeddings. + + Optionally, also the mask token. + """ + + def __init__(self, config: BeitConfig) -> None: + super().__init__(config) + + if config.use_temporal_position_embedding: + self.temporal_position_embeddings = nn.parameter.Parameter( + torch.zeros(1, config.num_frames, 1, config.hidden_size)) + else: + self.temporal_position_embeddings = None + + if config.add_k_prompts > 0: + self.prompt_tokens = nn.parameter.Parameter( + torch.zeros(1, config.add_k_prompts, config.hidden_size)) + else: + self.prompt_tokens = None + + def forward(self, + pixel_values: torch.Tensor, + bool_masked_pos: Optional[torch.BoolTensor] = None + ) -> torch.Tensor: + """ + Args: + pixel_values (torch.Tensor): The input image patches. + Shape: [B, T, C, H, W]. + + + """ + t = pixel_values.shape[1] + pixel_values = einops.rearrange(pixel_values, + 'b t c h w -> (b t) c h w') + + embeddings = self.patch_embeddings(pixel_values) + batch_size, seq_len, _ = embeddings.size() # [(b t) l c] + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) + if bool_masked_pos is not None: + mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) + # replace the masked visual tokens by mask_tokens + w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) + embeddings = embeddings * (1 - w) + mask_tokens * w + + if self.prompt_tokens is not None: + prompt_tokens = self.prompt_tokens.expand(batch_size, -1, -1) + embeddings = torch.cat((cls_tokens, embeddings, prompt_tokens), + dim=1) + else: + embeddings = torch.cat((cls_tokens, embeddings), + dim=1) # [B*T, L, C] + if self.position_embeddings is not None: + embeddings = embeddings + self.position_embeddings + + embeddings = einops.rearrange(embeddings, '(b t) l c -> b t l c', t=t) + if self.temporal_position_embeddings is not None: + if t <= self.temporal_position_embeddings.shape[1]: + embeddings = embeddings + \ + self.temporal_position_embeddings[:, :t] + else: + tpe = interpolate_temporal_pos_embed( + self.temporal_position_embeddings, t) + embeddings = embeddings + tpe + + embeddings = self.dropout(embeddings) + + return embeddings + + +class BeitLayer3D(BeitLayer2D): + + def __init__(self, + config: BeitConfig, + window_size: Optional[tuple] = None, + drop_path_rate: float = 0.0) -> None: + super().__init__(config, window_size, drop_path_rate) + + self.temporal_model_position = config.temporal_model_position + if config.temporal_model_block == 'st_adapter': + self.temp_model = STAdapter(**config.temporal_model_config) + elif config.temporal_model_block == 'timesformer': + self.temp_model = TemporalAttention(**config.temporal_model_config) + elif config.temporal_model_block == 'ta_beit': + self.temp_model = TemporalAttentionBeit(config) + elif config.temporal_model_block == 'window_attention': + self.temp_model = WindowTemporalAttention( + **config.temporal_model_config) + elif config.temporal_model_block == 'xclip': + self.temp_model = X_CLIP(**config.temporal_model_config) + elif config.temporal_model_block == 'none': + self.temp_model = None + else: + raise ValueError( + f'not accepted temporal model: {config.temporal_model_block}') + + self.temporal_model_block = config.temporal_model_block + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + relative_position_bias: Optional['BeitRelativePositionBias'] = None, + ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]: + + b, t, l, c = hidden_states.shape + + if self.temporal_model_block == 'xclip': + assert (self.temporal_model_position == 'first' + and self.config.add_k_prompts + == 1), ('xclip must be put before the attention and' + 'add_k_prompts must be 1.') + + if self.temp_model is not None and \ + self.temporal_model_position == 'first': + hidden_states = self.temp_model(hidden_states) + + hidden_states = einops.rearrange(hidden_states, 'b t l c -> (b t) l c') + + self_attention_outputs = self.attention( + self.layernorm_before( + hidden_states + ), # in BEiT, layernorm is applied before self-attention + head_mask, + output_attentions=output_attentions, + relative_position_bias=relative_position_bias, + ) + attention_output = self_attention_outputs[0] + + # add self attentions if we output attention weights + outputs = self_attention_outputs[1:] + + # apply lambda_1 if present + if self.lambda_1 is not None: + attention_output = self.lambda_1 * attention_output + + # first residual connection + hidden_states = self.drop_path(attention_output) + hidden_states + + # in BEiT, layernorm is also applied after self-attention + layer_output = self.layernorm_after(hidden_states) + + layer_output = self.intermediate(layer_output) + layer_output = self.output(layer_output) + + if self.lambda_2 is not None: + layer_output = self.lambda_2 * layer_output + + # second residual connection + layer_output = self.drop_path(layer_output) + hidden_states + + layer_output = einops.rearrange( + layer_output, '(b t) l c -> b t l c', b=b) + + # apply temporal modeling block + if self.temp_model is not None and \ + self.temporal_model_position == 'last': + layer_output = self.temp_model(layer_output) + + outputs = (layer_output, ) + outputs + + return outputs + + +class BeitConfig3D(BeitConfig): + + def __init__(self, + num_frames=1, + temporal_model_block='none', + temporal_model_position='last', + temporal_model_init_value=0.0, + temporal_model_config={}, + use_temporal_position_embedding=False, + add_k_prompts=0, + **kwargs) -> None: + + super().__init__(**kwargs) + self.temporal_model_block = temporal_model_block + self.temporal_model_config = temporal_model_config + self.temporal_model_position = temporal_model_position + self.temporal_model_init_value = temporal_model_init_value + self.use_temporal_position_embedding = use_temporal_position_embedding + self.add_k_prompts = add_k_prompts + self.num_frames = num_frames + + +@MODELS.register_module() +class BeitModel3D(BeitModel): + + def __init__(self, + config: BeitConfig, + tem_config: Dict, + add_pooling_layer: bool = True) -> None: + # hack to replace original 2D modules with 3D modules + beit_package = importlib.import_module( + 'transformers.models.beit.modeling_beit') + beit_package.BeitEmbeddings = BeitEmbeddings3D + beit_package.BeitPooler = BeitPooler3D + beit_package.BeitLayer = BeitLayer3D + beit_package.BeitRelativePositionBias = BeitRelativePositionBias3D + + config = BeitConfig3D.from_pretrained(config, **tem_config) + super().__init__(config, add_pooling_layer) diff --git a/mmaction/models/multimodal/vindlu/modeling_bert.py b/mmaction/models/multimodal/vindlu/modeling_bert.py new file mode 100644 index 0000000000..5ffba79bdc --- /dev/null +++ b/mmaction/models/multimodal/vindlu/modeling_bert.py @@ -0,0 +1,1740 @@ +# flake8: noqa +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model.""" + +import math +import os +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +import transformers +from mmengine.logging import MMLogger +from torch import Tensor, device, dtype, nn +from torch.nn import CrossEntropyLoss, MSELoss +from transformers.activations import ACT2FN +# from transformers.models.bert.configuration_bert import BertConfig +from transformers.configuration_utils import PretrainedConfig +from transformers.file_utils import (ModelOutput, add_start_docstrings, + add_start_docstrings_to_model_forward, + replace_return_docstrings) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, MaskedLMOutput, + MultipleChoiceModelOutput, NextSentencePredictorOutput, + QuestionAnsweringModelOutput, SequenceClassifierOutput, + TokenClassifierOutput) +from transformers.modeling_utils import (PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer) + +transformers.logging.set_verbosity_error() + +_CONFIG_FOR_DOC = 'BertConfig' +_TOKENIZER_FOR_DOC = 'BertTokenizer' + +BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ + 'bert-base-uncased', + 'bert-large-uncased', + 'bert-base-cased', + 'bert-large-cased', + 'bert-base-multilingual-uncased', + 'bert-base-multilingual-cased', + 'bert-base-chinese', + 'bert-base-german-cased', + 'bert-large-uncased-whole-word-masking', + 'bert-large-cased-whole-word-masking', + 'bert-large-uncased-whole-word-masking-finetuned-squad', + 'bert-large-cased-whole-word-masking-finetuned-squad', + 'bert-base-cased-finetuned-mrpc', + 'bert-base-german-dbmdz-cased', + 'bert-base-german-dbmdz-uncased', + 'cl-tohoku/bert-base-japanese', + 'cl-tohoku/bert-base-japanese-whole-word-masking', + 'cl-tohoku/bert-base-japanese-char', + 'cl-tohoku/bert-base-japanese-char-whole-word-masking', + 'TurkuNLP/bert-base-finnish-cased-v1', + 'TurkuNLP/bert-base-finnish-uncased-v1', + 'wietsedv/bert-base-dutch-cased', + # See all BERT models at https://huggingface.co/models?filter=bert +] + + +class BertConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`BertModel`] or a [`TFBertModel`]. It is used to + instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the BERT + [bert-base-uncased](https://huggingface.co/bert-base-uncased) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + + Examples: + + ```python + >>> from transformers import BertModel, BertConfig + + >>> # Initializing a BERT bert-base-uncased style configuration + >>> configuration = BertConfig() + + >>> # Initializing a model from the bert-base-uncased style configuration + >>> model = BertModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = 'bert' + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act='gelu', + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + pad_token_id=0, + position_embedding_type='absolute', + use_cache=True, + classifier_dropout=None, + cross_module='ca', + encoder_width=768, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout + self.cross_module = cross_module + self.encoder_width = encoder_width + + +def load_tf_weights_in_bert(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + logger = MMLogger.get_current_instance() + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + 'Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see ' + 'https://www.tensorflow.org/install/ for installation instructions.' + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info('Converting TensorFlow checkpoint from {}'.format(tf_path)) + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info('Loading TF weight {} with shape {}'.format(name, shape)) + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split('/') + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any(n in [ + 'adam_v', + 'adam_m', + 'AdamWeightDecayOptimizer', + 'AdamWeightDecayOptimizer_1', + 'global_step', + ] for n in name): + logger.info('Skipping {}'.format('/'.join(name))) + continue + pointer = model + for m_name in name: + if re.fullmatch(r'[A-Za-z]+_\d+', m_name): + scope_names = re.split(r'_(\d+)', m_name) + else: + scope_names = [m_name] + if scope_names[0] == 'kernel' or scope_names[0] == 'gamma': + pointer = getattr(pointer, 'weight') + elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': + pointer = getattr(pointer, 'bias') + elif scope_names[0] == 'output_weights': + pointer = getattr(pointer, 'weight') + elif scope_names[0] == 'squad': + pointer = getattr(pointer, 'classifier') + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info('Skipping {}'.format('/'.join(name))) + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == '_embeddings': + pointer = getattr(pointer, 'weight') + elif m_name == 'kernel': + array = np.transpose(array) + try: + assert ( + pointer.shape == array.shape + ), f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + + logger.info('Initialize PyTorch weight {}'.format(name)) + pointer.data = torch.from_numpy(array) + return model + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type + embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, + config.hidden_size, + padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, + config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, + config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + 'position_ids', + torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, + 'position_embedding_type', + 'absolute') + + self.config = config + + def forward( + self, + input_ids=None, + token_type_ids=None, + position_ids=None, + inputs_embeds=None, + past_key_values_length=0, + ): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length: + seq_length + + past_key_values_length] + + if token_type_ids is None: + token_type_ids = torch.zeros( + input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == 'absolute': + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr( + config, 'embedding_size'): + raise ValueError( + 'The hidden size (%d) is not a multiple of the number of attention ' + 'heads (%d)' % + (config.hidden_size, config.num_attention_heads)) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / + config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, + 'position_embedding_type', + 'absolute') + if (self.position_embedding_type == 'relative_key' + or self.position_embedding_type == 'relative_key_query'): + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding( + 2 * config.max_position_embeddings - 1, + self.attention_head_size) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, + self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + key_layer = self.transpose_for_scores( + self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores( + self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, + key_layer.transpose(-1, -2)) + + if (self.position_embedding_type == 'relative_key' + or self.position_embedding_type == 'relative_key_query'): + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange( + seq_length, dtype=torch.long, + device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange( + seq_length, dtype=torch.long, + device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding( + distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to( + dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == 'relative_key': + relative_position_scores = torch.einsum( + 'bhld,lrd->bhlr', query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == 'relative_key_query': + relative_position_scores_query = torch.einsum( + 'bhld,lrd->bhlr', query_layer, positional_embedding) + relative_position_scores_key = torch.einsum( + 'bhrd,lrd->bhlr', key_layer, positional_embedding) + attention_scores = ( + attention_scores + relative_position_scores_query + + relative_position_scores_key) + + attention_scores = attention_scores / math.sqrt( + self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + ( + self.all_head_size, ) + context_layer = context_layer.view(*new_context_layer_shape) + + # added `attention_scores` to return tuple + outputs = ((context_layer, attention_probs, + attention_scores) if output_attentions else + (context_layer, )) + + outputs = outputs + (past_key_value, ) + return outputs + + +class BertSelfOutput(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + + def __init__(self, config, is_cross_attention=False): + super().__init__() + + self.self = BertSelfAttention(config, is_cross_attention) + + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, + self.self.num_attention_heads, + self.self.attention_head_size, + self.pruned_heads, + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len( + heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + # add attentions if we output them + outputs = (attention_output, ) + self_outputs[1:] + return outputs # (context_layer, attention_probs, attention_scores, past_key_value,) + + +class BertIntermediate(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + + self.has_cross_attention = layer_num >= config.fusion_layer + if self.has_cross_attention: + self.crossattention = BertAttention( + config, is_cross_attention=True) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[: + 2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) # (context_layer, attention_probs, attention_scores, past_key_value,) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + + if self.has_cross_attention: + assert ( + encoder_hidden_states is not None + ), 'encoder_hidden_states must be given for cross-attention layers' + + if type(encoder_hidden_states) == list: + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states[(self.layer_num - + self.config.fusion_layer) % + len(encoder_hidden_states)], + encoder_attention_mask[(self.layer_num - + self.config.fusion_layer) % + len(encoder_hidden_states)], + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] + + else: + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) # (context_layer, attention_probs, attention_scores, past_key_value,) + attention_output = cross_attention_outputs[0] + # add cross attentions if we output attention weights + outputs = outputs + cross_attention_outputs[1:-1] + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output, ) + outputs + + outputs = outputs + (present_key_value, ) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [BertLayer(config, i) for i in range(config.num_hidden_layers)]) + logger = MMLogger.get_current_instance() + logger.info(f'build bert with cross_module: {config.cross_module}') + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + mode='multi_modal', + normalize_attention=True, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + # all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + all_cross_attentions = () if output_attentions else None + + next_decoder_cache = () if use_cache else None + + if (mode == 'text' or mode == 'temporal' + ): # temporal is added and used for temporal att module. + start_layer = 0 + output_layer = self.config.fusion_layer + + elif mode == 'fusion': + start_layer = self.config.fusion_layer + output_layer = self.config.num_hidden_layers + + elif mode == 'multi_modal': + start_layer = 0 + output_layer = self.config.num_hidden_layers + + for i in range(start_layer, output_layer): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[ + i] if past_key_values is not None else None + + if getattr(self.config, 'gradient_checkpointing', + False) and self.training: + + if use_cache: + logger = MMLogger.get_current_instance() + logger.warn( + '`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting ' + '`use_cache=False`...') + use_cache = False + + def create_custom_forward(module): + + def custom_forward(*inputs): + return module(*inputs, past_key_value, + output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + use_reentrant=False, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) # (context_layer, attention_probs, attention_scores, past_key_value,) + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1], ) + if output_attentions: + # whether to output normalized attention, + # note for unnormalized attention, there is a mask added + offset = int(normalize_attention) + # all_self_attentions = all_self_attentions + (layer_outputs[1], ) + all_self_attentions = all_self_attentions + ( + layer_outputs[2 - offset], ) + if hasattr(layer_module, 'crossattention'): + # all_cross_attentions = all_cross_attentions + (layer_outputs[3], ) + all_cross_attentions = all_cross_attentions + ( + layer_outputs[4 - offset], ) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + if not return_dict: + return tuple(v for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] if v is not None) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear( + config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertOnlyNSPHead(nn.Module): + + def __init__(self, config): + super().__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +class BertPreTrainingHeads(nn.Module): + + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class BertPreTrainedModel(PreTrainedModel): + """An abstract class to handle weights initialization and a simple + interface for downloading and loading pretrained models.""" + + config_class = BertConfig + load_tf_weights = load_tf_weights_in_bert + base_model_prefix = 'bert' + _keys_to_ignore_on_load_missing = [r'position_ids'] + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_( + mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +@dataclass +class BertForPreTrainingOutput(ModelOutput): + """Output type of :class:`~transformers.BertForPreTraining`. + + Args: + loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss. + prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): + Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) + of shape :obj:`(batch_size, sequence_length, hidden_size)`. + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): + Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, + sequence_length, sequence_length)`. + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + prediction_logits: torch.FloatTensor = None + seq_relationship_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +BERT_START_DOCSTRING = r""" + This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic + methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, + pruning heads etc.) + This model is also a PyTorch `torch.nn.Module `__ + subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to + general usage and behavior. + Parameters: + config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model + weights. +""" + +BERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): + Indices of input sequence tokens in the vocabulary. + Indices can be obtained using :class:`~transformers.BertTokenizer`. See + :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for + details. + `What are input IDs? <../glossary.html#input-ids>`__ + attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): + Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + `What are attention masks? <../glossary.html#attention-mask>`__ + token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, + 1]``: + - 0 corresponds to a `sentence A` token, + - 1 corresponds to a `sentence B` token. + `What are token type IDs? <../glossary.html#token-type-ids>`_ + position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, + config.max_position_embeddings - 1]``. + `What are position IDs? <../glossary.html#position-ids>`_ + head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): + Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): + Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert :obj:`input_ids` indices into associated + vectors than the model's internal embedding lookup matrix. + output_attentions (:obj:`bool`, `optional`): + Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned + tensors for more detail. + output_hidden_states (:obj:`bool`, `optional`): + Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for + more detail. + return_dict (:obj:`bool`, `optional`): + Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. +""" + + +@add_start_docstrings( + 'The bare Bert Model transformer outputting raw hidden-states without any specific head on top.', + BERT_START_DOCSTRING, +) +class BertModel(BertPreTrainedModel): + """The model can behave as an encoder (with only self-attention) as well as + a decoder, in which case a layer of cross-attention is added between the + self-attention layers, following the architecture described in `Attention + is all you need `__ by Ashish Vaswani, + Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. + + Gomez, Lukasz Kaiser and Illia Polosukhin. argument and + :obj:`add_cross_attention` set to :obj:`True`; an + :obj:`encoder_hidden_states` is then expected as an input to the forward + pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """Prunes heads of the model. + + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_extended_attention_mask(self, attention_mask: Tensor, + input_shape: Tuple[int], device: device, + is_decoder: bool) -> Tensor: + """Makes broadcastable attention and causal masks so that future and + masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + seq_ids = torch.arange(seq_length, device=device) + causal_mask = ( + seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= + seq_ids[None, :, None]) + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[ + 1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones( + (batch_size, seq_length, prefix_seq_len), + device=device, + dtype=causal_mask.dtype, + ), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = ( + causal_mask[:, None, :, :] * + attention_mask[:, None, None, :]) + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + 'Wrong shape for input_ids (shape {}) or attention_mask (shape {})' + .format(input_shape, attention_mask.shape)) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to( + dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multi_modal', + normalize_attention=True, + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = ( + output_attentions if output_attentions is not None else + self.config.output_attentions) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + 'You cannot specify both input_ids and inputs_embeds at the same time' + ) + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = inputs_embeds.device + elif encoder_embeds is not None: + input_shape = encoder_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = encoder_embeds.device + else: + raise ValueError( + 'You have to specify either input_ids or inputs_embeds or encoder_embeds' + ) + + # past_key_values_length + past_key_values_length = ( + past_key_values[0][0].shape[2] + if past_key_values is not None else 0) + + if attention_mask is None: + attention_mask = torch.ones( + ((batch_size, seq_length + past_key_values_length)), + device=device) + if token_type_ids is None: + token_type_ids = torch.zeros( + input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( + attention_mask, input_shape, device, is_decoder) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[ + 0].size() + else: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size( + ) + encoder_hidden_shape = (encoder_batch_size, + encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [ + self.invert_attention_mask(mask) + for mask in encoder_attention_mask + ] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones( + encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, + self.config.num_hidden_layers) + + if encoder_embeds is None: + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + else: + embedding_output = encoder_embeds + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + mode=mode, + normalize_attention=normalize_attention, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler( + sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next + sentence prediction (classification)` head. + """, + BERT_START_DOCSTRING, +) +class BertForPreTraining(BertPreTrainedModel): + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config) + self.cls = BertPreTrainingHeads(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) + @replace_return_docstrings( + output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + next_sentence_label=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`): + Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., + config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored + (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair + (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): + Used to hide legacy arguments that have been deprecated. + Returns: + Example:: + >>> from transformers import BertTokenizer, BertForPreTraining + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> model = BertForPreTraining.from_pretrained('bert-base-uncased') + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.prediction_logits + >>> seq_relationship_logits = outputs.seq_relationship_logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output, pooled_output = outputs[:2] + prediction_scores, seq_relationship_score = self.cls( + sequence_output, pooled_output) + + total_loss = None + if labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1)) + next_sentence_loss = loss_fct( + seq_relationship_score.view(-1, 2), + next_sentence_label.view(-1)) + total_loss = masked_lm_loss + next_sentence_loss + + if not return_dict: + output = (prediction_scores, seq_relationship_score) + outputs[2:] + return ((total_loss, ) + + output) if total_loss is not None else output + + return BertForPreTrainingOutput( + loss=total_loss, + prediction_logits=prediction_scores, + seq_relationship_logits=seq_relationship_score, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """Bert Model with a `language modeling` head on top for CLM fine-tuning. """, + BERT_START_DOCSTRING, +) +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r'pooler'] + _keys_to_ignore_on_load_missing = [ + r'position_ids', r'predictions.decoder.bias' + ] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) + @replace_return_docstrings( + output_type=CausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=True, + reduction='mean', + mode='multi_modal', + normalize_attention=True, + soft_labels=None, + alpha=0, + return_logits=False, + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + normalize_attention=normalize_attention, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, : + -1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(reduction=reduction) + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1)) + lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) + + if soft_labels is not None: + loss_distill = -torch.sum( + F.log_softmax(shifted_prediction_scores, dim=1) * soft_labels, + dim=-1) + loss_distill = (loss_distill * (labels != -100)).sum(1) + lm_loss = (1 - alpha) * lm_loss + alpha * loss_distill + + if not return_dict: + output = (prediction_scores, ) + outputs[2:] + return ((lm_loss, ) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, + input_ids, + past=None, + attention_mask=None, + **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + 'input_ids': + input_ids, + 'attention_mask': + attention_mask, + 'past_key_values': + past, + 'encoder_hidden_states': + model_kwargs.get('encoder_hidden_states', None), + 'encoder_attention_mask': + model_kwargs.get('encoder_attention_mask', None), + 'is_decoder': + True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple( + past_state.index_select(0, beam_idx) + for past_state in layer_past), ) + return reordered_past + + +@dataclass +class MaskedLMOutputWithDistill(MaskedLMOutput): + loss_aux: Optional[torch.FloatTensor] = None + loss_distill: Optional[torch.FloatTensor] = None + + +@add_start_docstrings( + """Bert Model with a `language modeling` head on top. """, + BERT_START_DOCSTRING) +class BertForMaskedLM(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r'pooler'] + _keys_to_ignore_on_load_missing = [ + r'position_ids', r'predictions.decoder.bias' + ] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def tie_aux_decoder_weights(self, module, aux_modules): + """Tie decoder weights of all `aux_modules` to `module`, (not bias)""" + for m in aux_modules: + m.predictions.decoder.weight = module.predictions.decoder.weight + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multi_modal', + normalize_attention=True, + soft_labels=None, + alpha=0, + return_logits=False, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., + config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored + (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_embeds=encoder_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + normalize_attention=normalize_attention, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores + + masked_lm_loss = None + masked_lm_loss_aux = 0.0 + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1)) + + if soft_labels is not None: + loss_distill = -torch.sum( + F.log_softmax(prediction_scores, dim=1) * soft_labels, dim=-1) + loss_distill = loss_distill[labels != -100].mean() + masked_lm_loss = (1 - + alpha) * masked_lm_loss + alpha * loss_distill + + if not return_dict: + output = (prediction_scores, ) + outputs[2:] + return ((masked_lm_loss, ) + + output) if masked_lm_loss is not None else output + + # changed from MaskedLMOutput to MaskedLMOutputWithDistill + return MaskedLMOutputWithDistill( + loss=masked_lm_loss, + loss_aux=masked_lm_loss_aux, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation(self, + input_ids, + attention_mask=None, + **model_kwargs): + input_shape = input_ids.shape + effective_batch_size = input_shape[0] + + # add a dummy token + assert (self.config.pad_token_id + is not None), 'The PAD token should be defined for generation' + attention_mask = torch.cat([ + attention_mask, + attention_mask.new_zeros((attention_mask.shape[0], 1)) + ], + dim=-1) + dummy_token = torch.full( + (effective_batch_size, 1), + self.config.pad_token_id, + dtype=torch.long, + device=input_ids.device, + ) + input_ids = torch.cat([input_ids, dummy_token], dim=1) + + return {'input_ids': input_ids, 'attention_mask': attention_mask} diff --git a/mmaction/models/multimodal/vindlu/temporal_model.py b/mmaction/models/multimodal/vindlu/temporal_model.py new file mode 100644 index 0000000000..7271aedc8a --- /dev/null +++ b/mmaction/models/multimodal/vindlu/temporal_model.py @@ -0,0 +1,213 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import einops +import torch +from einops import rearrange +from timm.models.layers import DropPath +from torch import nn +from torch.nn import LayerNorm, Linear, MultiheadAttention + + +class STAdapter(nn.Module): + """ST Adapter.""" + + def __init__( + self, + kernel_size=(3, 3, 3), + input_dim=768, + hidden_dim=384, + img_size=224, + patch_size=16, + drop_prob=0.1, + ): + super(STAdapter, self).__init__() + self.kernel_size = kernel_size + self.input_dim = input_dim + self.hidden_dim = hidden_dim + + self.h = self.w = img_size // patch_size + + self.linear1 = nn.Linear(input_dim, hidden_dim) + self.linear2 = nn.Linear(hidden_dim, input_dim) + self.act = nn.ReLU() + self.conv = nn.Conv3d( + hidden_dim, + hidden_dim, + kernel_size=kernel_size, + padding='same', + groups=hidden_dim) + self.droppath = DropPath(drop_prob=drop_prob) + + self.scale = nn.parameter.Parameter(torch.zeros([])) + + def forward(self, x: torch.Tensor): + """forward. + + Args: + x (torch.Tensor): input features. + Shape: [bs, nframes, l, c]. l = 1 + h*w + + Returns: features after adapter. The same shape as input. + """ + if x.shape[1] == 1: # for single frame, return itself. + return x + + shortcut = x + x = self.linear1(x) + cls = x[:, :, :1, :] + tokens = x[:, :, 1:, :] + tokens = einops.rearrange( + tokens, 'b t (h w) c -> b c t h w', h=self.h).contiguous() + tokens = self.conv(tokens) + tokens = einops.rearrange(tokens, 'b c t h w -> b t (h w) c') + x = torch.cat([cls, tokens], dim=2) # [b, t, 1+h*w, c] + x = self.act(x) + x = self.linear2(x) + + return shortcut + self.scale * self.droppath(x) + + +class TemporalAttention(nn.Module): + """perform temporal self-attention.""" + + def __init__(self, input_dim=768, droppath_rate=0.1): + """ + + Kwargs: + input_dim (int): The input feature dimension. + + + """ + super().__init__() + + self._input_dim = input_dim + self.temporal_attn = MultiheadAttention( + input_dim, num_heads=input_dim // 64) + self.norm = LayerNorm(input_dim, eps=1e-12) + self.linear = Linear(input_dim, input_dim) + self.droppath = DropPath(droppath_rate) + self.scale = nn.parameter.Parameter(torch.zeros([])) + + def forward(self, x: torch.Tensor): + """forward. + + Args: + x (torch.Tensor): input features. + Shape: [bs, nframes, l, c]. l = 1 + h*w + + Returns: features after adapter. The same shape as input. + """ + if x.shape[1] == 1: # for single frame, return itself. + return x + + shortcut = x + x = einops.rearrange(x, 'b t l c -> t (b l) c') + x = self.norm(x) + x = self.temporal_attn(x, x, x)[0] + x = einops.rearrange(x, 't (b l) c -> b t l c', b=shortcut.shape[0]) + return shortcut + self.scale * self.droppath(x) + + +class WindowTemporalAttention(nn.Module): + """perform windowed temporal self-attention.""" + + def __init__(self, input_dim=768, droppath_rate=0.1, window_size=(2, 2)): + """ + + Kwargs: + input_dim (int): The input feature dimension. + + + """ + super().__init__() + + self._input_dim = input_dim + self.temporal_attn = MultiheadAttention( + input_dim, num_heads=input_dim // 64) + self.norm = LayerNorm(input_dim, eps=1e-12) + self.droppath = DropPath(droppath_rate) + self.scale = nn.parameter.Parameter(torch.zeros([])) + self.wh, self.ww = window_size + + def forward(self, x: torch.Tensor): + """forward. + + Args: + x (torch.Tensor): input features. + Shape: [bs, nframes, l, c]. l = 1 + h*w + + Returns: features after adapter. The same shape as input. + """ + if x.shape[1] == 1: # for single frame, return itself. + return x + shortcut = x + + h = w = int(math.sqrt(x.shape[2] - 1)) + cls_token = x[:, :, :1, :] + x = einops.rearrange( + x[:, :, 1:, :], + 'b t (nh wh nw ww) c -> (t wh ww) (b nh nw) c', + nh=h // self.wh, + wh=self.wh, + nw=w // self.ww, + ww=self.ww, + ) + x = self.norm(x) + x = self.temporal_attn(x, x, x)[0] + x = einops.rearrange( + x, + '(t wh ww) (b nh nw) c -> b t (nh wh nw ww) c', + wh=self.wh, + ww=self.ww, + nh=h // self.wh, + nw=w // self.ww, + ) + # add back cls token. + x = torch.concat([cls_token, x], dim=2) + return shortcut + self.scale * self.droppath(x) + + +class X_CLIP(nn.Module): + """perform windowed temporal self-attention.""" + + def __init__(self, input_dim=768, droppath_rate=0.1, num_prompts=1): + """ + + Kwargs: + input_dim (int): The input feature dimension. + + + """ + super().__init__() + + d_model = input_dim + + self.message_fc = nn.Linear(d_model, d_model) + self.message_ln = LayerNorm(d_model, eps=1e-12) + self.message_attn = nn.MultiheadAttention(d_model, d_model // 64) + self.num_prompts = num_prompts + + self.droppath = DropPath(droppath_rate) + + def forward(self, x: torch.Tensor): + """forward. + + Args: + x (torch.Tensor): input features. + Shape: [bs, nframes, l, c]. l = 1 + h*w + + Returns: features after adapter. The same shape as input. + """ + if x.shape[1] == 1: # for single frame, return itself. + return x + msg_token = self.message_ln(self.message_fc(x[:, :, + 0, :])) # [b, t, c] + msg_token = rearrange(msg_token, 'b t c -> t b c') + msg_token = msg_token + self.droppath( + self.message_attn(msg_token, msg_token, msg_token)[0]) + msg_token = rearrange(msg_token, 't b c -> b t c') + # replace the last prompt token with msg_token. + x = torch.cat([x[:, :, :-1, :], + msg_token.unsqueeze(2)], dim=2) # [b, t, l+1, c] + return x diff --git a/mmaction/models/multimodal/vindlu/tokenizer.py b/mmaction/models/multimodal/vindlu/tokenizer.py new file mode 100644 index 0000000000..92be293dff --- /dev/null +++ b/mmaction/models/multimodal/vindlu/tokenizer.py @@ -0,0 +1,45 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +from transformers import BertTokenizer + +from mmaction.registry import TOKENIZER + + +class VindLUTokenizer(BertTokenizer): + """VindLUTokenizer inherit BertTokenizer. + + The main difference from BertTokenizer is removing the last separate token + for a single sequence. + """ + + def build_inputs_with_special_tokens( + self, + token_ids_0: List[int], + token_ids_1: Optional[List[int]] = None) -> List[int]: + """Build model inputs from a sequence or a pair of sequence for + sequence classification tasks by concatenating and adding special + tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with + the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + +TOKENIZER.register_module( + 'VindLUTokenizer', module=VindLUTokenizer.from_pretrained) diff --git a/mmaction/models/multimodal/vindlu/utils.py b/mmaction/models/multimodal/vindlu/utils.py new file mode 100644 index 0000000000..8737dde9ea --- /dev/null +++ b/mmaction/models/multimodal/vindlu/utils.py @@ -0,0 +1,195 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmengine.dist as dist +import numpy as np +import torch +import torch.nn.functional as F +from mmengine.logging import MMLogger +from scipy import interpolate + + +def all_gather_concat(data: torch.Tensor) -> torch.Tensor: + """Gather tensors with different first-dimension size and concat to one + tenosr. + + Note: + Only the first dimension should be different. + + Args: + data (Tensor): Tensor to be gathered. + + Returns: + torch.Tensor: The concatenated tenosr. + """ + if dist.get_world_size() == 1: + return data + + data_size = torch.tensor(data.size(0), device=data.device) + sizes_list = dist.all_gather(data_size) + + total_length = sum(sizes_list) + max_length = max(sizes_list) + size_diff = max_length.item() - data_size.item() + if size_diff: + padding = torch.zeros( + size_diff, *data.size()[1:], device=data.device, dtype=data.dtype) + data = torch.cat((data, padding)) + + gather_list = dist.all_gather(data) + + # gather all data according to the default DDP sampler. For instance, + # 8 samples on 2 GPUs, GPU0: [0,2,4,6], GPU1: [1,3,5,7], will be gathered + # as [0,1,2,3,4,5,6,7] + all_data = [] + for gather_batch in zip(*gather_list): + all_data.extend(gather_batch) + + return torch.stack(all_data)[:total_length] + + +def interpolate_pos_embed_beit(state_dict, new_model): + """interpolate the positional embeddings. The spatial pe is relative and + temporal pe is absolute. additional temporal pe is padded with 0. + + Args: + state_dict (dict): The state_dict. + new_model (nn.Module): The created model. + + Returns: dict. The state_dict with updated positional embeddings. + """ + state_dict = interpolate_pos_relative_bias_beit( + state_dict_old=state_dict, + state_dict_new=new_model.state_dict(), + patch_shape_new=new_model.vision_encoder.embeddings.patch_embeddings. + patch_shape, + ) + # absolute temporal pos bias + temporal_pe_key = 'vision_encoder.embeddings.temporal_position_embeddings' + if temporal_pe_key in state_dict: + logger = MMLogger.get_current_instance() + logger.info( + f'interpolate temporal positional embeddings: {temporal_pe_key}') + state_dict[temporal_pe_key] = load_temp_embed_with_mismatch( + temp_embed_old=state_dict[temporal_pe_key], + temp_embed_new=new_model.state_dict()[temporal_pe_key], + ) + return state_dict + + +def load_temp_embed_with_mismatch(temp_embed_old, + temp_embed_new, + add_zero=True): + """Add/Remove extra temporal_embeddings as needed. + https://arxiv.org/abs/2104.00650 shows adding zero paddings works. + + temp_embed_old: (1, num_frames_old, 1, d) + temp_embed_new: (1, num_frames_new, 1, d) + add_zero: bool, if True, add zero, else, interpolate trained embeddings. + """ + # TODO zero pad + num_frms_new = temp_embed_new.shape[1] + num_frms_old = temp_embed_old.shape[1] + logger = MMLogger.get_current_instance() + logger.info( + f'Load temporal_embeddings, lengths: {num_frms_old}-->{num_frms_new}') + if num_frms_new > num_frms_old: + if add_zero: + temp_embed_new[:, :num_frms_old] \ + = temp_embed_old # untrained embeddings are zeros. + else: + temp_embed_new = interpolate_temporal_pos_embed( + temp_embed_old, num_frms_new) + elif num_frms_new < num_frms_old: + temp_embed_new = temp_embed_old[:, :num_frms_new] + else: # = + temp_embed_new = temp_embed_old + return temp_embed_new + + +def interpolate_temporal_pos_embed(temp_embed_old, num_frames_new): + """ + temp_embed_old: (1, num_frames_old, 1, d) + Returns: + temp_embed_new: (1, num_frames_new, 1, d) + """ + temp_embed_old = temp_embed_old.squeeze(2).permute( + 0, 2, 1) # (1, d, num_frames_old) + temp_embed_new = F.interpolate( + temp_embed_old, num_frames_new, + mode='linear') # (1, d, num_frames_new) + temp_embed_new = temp_embed_new.permute(0, 2, 1).unsqueeze( + 2) # (1, num_frames_new, 1, d) + return temp_embed_new + + +def interpolate_pos_relative_bias_beit(state_dict_old, state_dict_new, + patch_shape_new): + """ + Args: + state_dict_old: loaded state dict + state_dict_new: state dict for model with new image size + patch_shape_new: new model patch_shape + ref: https://github.com/microsoft/unilm/blob/master/beit/run_class_finetuning.py # noqa: E501 + """ + all_keys = list(state_dict_old.keys()) + for key in all_keys: + if 'relative_position_index' in key: + state_dict_old.pop(key) + + if 'relative_position_bias_table' in key: + rel_pos_bias = state_dict_old[key] + src_num_pos, num_attn_heads = rel_pos_bias.size() + dst_num_pos, _ = state_dict_new[key].size() + dst_patch_shape = patch_shape_new + if dst_patch_shape[0] != dst_patch_shape[1]: + raise NotImplementedError() + num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * ( + dst_patch_shape[1] * 2 - 1) + src_size = int((src_num_pos - num_extra_tokens)**0.5) + dst_size = int((dst_num_pos - num_extra_tokens)**0.5) + if src_size != dst_size: + extra_tokens = rel_pos_bias[-num_extra_tokens:, :] + rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] + + def geometric_progression(a, r, n): + return a * (1.0 - r**n) / (1.0 - r) + + left, right = 1.01, 1.5 + while right - left > 1e-6: + q = (left + right) / 2.0 + gp = geometric_progression(1, q, src_size // 2) + if gp > dst_size // 2: + right = q + else: + left = q + + dis = [] + cur = 1 + for i in range(src_size // 2): + dis.append(cur) + cur += q**(i + 1) + + r_ids = [-_ for _ in reversed(dis)] + + x = r_ids + [0] + dis + y = r_ids + [0] + dis + + t = dst_size // 2.0 + dx = np.arange(-t, t + 0.1, 1.0) + dy = np.arange(-t, t + 0.1, 1.0) + + all_rel_pos_bias = [] + + for i in range(num_attn_heads): + z = rel_pos_bias[:, i].view(src_size, + src_size).float().numpy() + f = interpolate.interp2d(x, y, z, kind='cubic') + all_rel_pos_bias.append( + torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to( + rel_pos_bias.device)) + + rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) + + new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), + dim=0) + state_dict_old[key] = new_rel_pos_bias + return state_dict_old diff --git a/mmaction/models/multimodal/vindlu/vindlu.py b/mmaction/models/multimodal/vindlu/vindlu.py new file mode 100644 index 0000000000..1f6f9dcff2 --- /dev/null +++ b/mmaction/models/multimodal/vindlu/vindlu.py @@ -0,0 +1,227 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import abstractmethod +from typing import Optional + +import torch +from mmengine.logging import MMLogger +from mmengine.model import BaseModel +from mmengine.runner.checkpoint import _load_checkpoint +from torch import nn + +from mmaction.registry import MODELS, TOKENIZER +from mmaction.utils import ForwardResults, SampleList +from .utils import (interpolate_pos_embed_beit, + interpolate_pos_relative_bias_beit) + + +class VindLUBase(BaseModel): + """VindLU base Model. + + Args: + tokenizer: (dict): The config for tokenizer. + vision_encoder (dict): Backbone for extracting image features. + text_encoder (dict): Backbone for extracting text features. + temperature (float): Temperature parameter that controls the + concentration level of the distribution. Defaults to 0.07. + gradient_checkpointing (bool): Whether to do gradient_checkpointing. + Using checkpoint will save some memory while slowing down the + training speed. Defaults to False. + data_preprocessor (Optional[dict]): The config for preprocessing input + data. + init_cfg (Optional[dict]): the config to control the initialization. + Defaults to None. + """ + + def __init__( + self, + tokenizer: dict, + vision_encoder: dict, + text_encoder: dict, + proj_dim: int = 256, + temperature: float = 0.07, + gradient_checkpointing: bool = False, + pretrined_vl: bool = True, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None, + ): + if data_preprocessor is None: + data_preprocessor = dict(type='ActionDataPreprocessor') + super().__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + self.tokenizer = TOKENIZER.build(tokenizer) + self.vision_cfg = vision_encoder + self.text_encoder_cfg = text_encoder + self.gradient_checkpointing = gradient_checkpointing + self.text_encoder_cfg.gradient_checkpointing = gradient_checkpointing + + self.vision_width = vision_encoder.pop('encoder_width') + self.text_width = text_encoder.encoder_width + self.pretrined_vl = pretrined_vl + + if self.vision_cfg.pop('add_ln'): + self.vision_layernorm = nn.LayerNorm(self.vision_width, eps=1e-12) + else: + self.vision_layernorm = nn.Identity() + + self.vision_encoder = MODELS.build(self.vision_cfg) + + if gradient_checkpointing: + self.vision_encoder.gradient_checkpointing_enable() + + self.text_encoder = MODELS.build(self.text_encoder_cfg) + + self.vision_proj = nn.Linear(self.vision_width, proj_dim) + self.text_proj = nn.Linear(self.text_width, proj_dim) + + self.temp = nn.parameter.Parameter(torch.ones([]) * temperature) + self.itm_head = nn.Linear(self.text_width, 2) + + def extract_feat(self, inputs: torch.Tensor, **kwargs) -> ForwardResults: + """Extract features from raw inputs.""" + + @abstractmethod + def loss(self, inputs: torch.Tensor, data_samples: SampleList, + **kwargs) -> dict: + """Calculate losses from a batch of inputs and data samples.""" + + def forward(self, inputs, data_samples, mode: str = 'loss'): + """The unified entry for a forward process in both training and test. + + The method should accept three modes: + + - ``tensor``: Forward the whole network and return tensor or tuple of + tensor without any post-processing, same as a common nn.Module. + - ``predict``: Forward and return the predictions, which are fully + processed to a list of :obj:`ActionDataSample`. + - ``loss``: Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[``ActionDataSample], optional): The + annotation data of every samples. Defaults to None. + mode (str): Return what kind of value. Defaults to ``tensor``. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor or a tuple of tensor. + - If ``mode="predict"``, return a list of ``ActionDataSample``. + - If ``mode="loss"``, return a dict of tensor. + """ + + if mode == 'tensor': + return self.extract_feat(inputs, data_samples) + elif mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + return self.predict(inputs, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def encode_vision(self, image): + """encode image / videos as features. + + Args: + image (torch.Tensor): The input images. + + Returns: tuple. + - vision_embeds (torch.Tensor): The features of all patches. + Shape: [B,T,L,C]. + - pooled_vision_embeds (torch.Tensor): The pooled features. + Shape: [B,T,C]. + """ + output_dict = self.vision_encoder(image) + vision_embeds = self.vision_layernorm(output_dict.last_hidden_state) + pooled_vision_embeds = output_dict.pooler_output + + return vision_embeds, pooled_vision_embeds + + def encode_text(self, text): + """encode text. + Args: + text (dict): The output of huggingface's `PreTrainedTokenizer`. + contains keys: + - input_ids (torch.Tensor): Token ids to be fed to a model. + Shape: [B,L]. + - attention_mask (torch.Tensor): The mask indicate padded tokens. + Shape: [B,L]. 0 is padded token. + - other keys refer to "https://huggingface.co/docs/transformers/v4.21.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__". # noqa: E501 + Returns: tuple. + - text_embeds (torch.Tensor): The features of all tokens. Shape: [B,L,C]. + - pooled_text_embeds (torch.Tensor): The pooled features. Shape: [B,C]. + + """ + text_output = self.text_encoder( + text.input_ids, + attention_mask=text.attention_mask, + return_dict=True, + mode='text', + ) + text_embeds = text_output.last_hidden_state + pooled_text_embeds = text_embeds[:, 0] + return text_embeds, pooled_text_embeds + + @torch.no_grad() + def clip_contrastive_temperature(self, min_val=0.001, max_val=0.5): + """Seems only used during pre-training.""" + self.temp.clamp_(min_val, max_val) + + @property + def device(self): + return next(self.parameters()).device + + def preprocess_state_dict(self, state_dict): + """Preprocess pretrained checkpoint for text_encoder.""" + for key in list(state_dict.keys()): + if 'bert' in key: + encoder_key = key.replace('bert.', '') + state_dict[encoder_key] = state_dict[key] + del state_dict[key] + return state_dict + + def load_from_pretrainded_beit(self): + from transformers.models.beit.modeling_beit import BeitModel + beit2d = BeitModel.from_pretrained( + self.vision_cfg.pretrained_model_name_or_path) + ori_state_dict = beit2d.state_dict() + del beit2d + # interpolate relative pos bias + state_dict = interpolate_pos_relative_bias_beit( + state_dict_old=ori_state_dict, + state_dict_new=self.vision_encoder.state_dict(), + patch_shape_new=self.vision_encoder.embeddings.patch_embeddings. + patch_shape, + ) + + for k in list(state_dict.keys()): + if 'prompt_bias_table' in k: + del state_dict[k] + + msg = self.vision_encoder.load_state_dict(state_dict, strict=False) + logger = MMLogger.get_current_instance() + logger.info(msg) + + def init_weights(self): + if self.vision_cfg.get('pretrained2d', False): + self.load_from_pretrainded_beit() + + if self.pretrined_vl: + assert self.init_cfg.get('type') == 'Pretrained', ( + 'Please specify ' + 'init_cfg to use pretrained video-language checkpoint') + self.pretrained = self.init_cfg.get('checkpoint') + checkpoint = _load_checkpoint(self.pretrained, map_location='cpu') + state_dict = checkpoint['model'] + state_dict = interpolate_pos_embed_beit(state_dict, self) + state_dict = self.preprocess_state_dict(state_dict) + msg = self.load_state_dict(state_dict, strict=False) + logger = MMLogger.get_current_instance() + logger.info(msg) + else: + super().init_weights() diff --git a/mmaction/models/multimodal/vindlu/vindlu_ret.py b/mmaction/models/multimodal/vindlu/vindlu_ret.py new file mode 100644 index 0000000000..cc80982c39 --- /dev/null +++ b/mmaction/models/multimodal/vindlu/vindlu_ret.py @@ -0,0 +1,464 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional + +import mmengine.dist as dist +import torch +import torch.nn.functional as F +from einops import rearrange +from torch.distributed.nn import all_gather as all_gather_with_grad + +from mmaction.registry import MODELS +from mmaction.structures import ActionDataSample +from mmaction.utils import track_on_main_process +from .utils import all_gather_concat +from .vindlu import VindLUBase + + +@MODELS.register_module() +class VindLURetrieval(VindLUBase): + """VindLU retriever. + + max_txt_len (int): Max text length of input text, used for retrieval + from multiple choices. Defaults to 32. + topk (int): Select topk similarity as candidates for compute matching + scores. Defaults to 256. + negative_all_rank (bool): Whether to sample negative data from all + ranks for image text matching in training. Defaults to False. + fast_match (bool): If False, select topk similarity as candidates and + compute the matching score. If True, return the similarity as the + matching score directly. Defaults to False. + **kwargs: Other keyword arguments to initialize the VindLU base model. + """ + + def __init__(self, + max_txt_len: int = 32, + topk: int = 128, + negative_all_rank: bool = False, + fast_match: bool = False, + **kwargs): + super().__init__(**kwargs) + + self.max_txt_len = max_txt_len + self.topk = topk + self.negative_all_rank = negative_all_rank + self.fast_match = fast_match + + def loss( + self, + inputs: torch.Tensor, + data_samples: Optional[List[ActionDataSample]] = None, + ) -> Dict[str, torch.tensor]: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (dict): A batch of inputs. The input tensor with of + at least one modality. For image, the value is a tensor + of shape (N, C, ...) in general. + For text, the value is a dict of tokenized text inputs. + data_samples (Optional[List[DataSample]]): + The annotation data of every samples. Defaults to None. + + Returns: + Dict[str, torch.tensor]: a dictionary of loss components of + """ + output = self.extract_feat(inputs, data_samples) + + text_embeds = output['text_embeds'] + text_attn_mask = output['text_attn_mask'] + image_embeds = output['image_embeds'] + image_feat = output['image_feat'] + text_feat = output['text_feat'] + + image_atts = torch.ones( + image_embeds.size()[:-1], dtype=torch.long).to(self.device) + + # ITC Loss + # B*world_size, D + image_feat_all = torch.cat(dist.all_gather(image_feat)) + # B*world_size, D + text_feat_all = torch.cat(dist.all_gather(text_feat)) + + # image to text similarity + # B, B*world_size + sim_i2t = torch.einsum('mld,nd->mln', image_feat, + text_feat_all).mean(1) / self.temp + # text-image similarity + # B, B*world_size + sim_t2i = torch.einsum('md,nld->mln', text_feat, + image_feat_all).mean(1) / self.temp + + rank = dist.get_rank() + bs = inputs.size(0) + itc_targets = torch.linspace( + rank * bs, rank * bs + bs - 1, bs, dtype=int).to(self.device) + + itc_loss = (F.cross_entropy(sim_i2t, itc_targets) + + F.cross_entropy(sim_t2i, itc_targets)) / 2 + + # prepare for itm + output_pos = self.text_encoder( + encoder_embeds=text_embeds, + attention_mask=text_attn_mask, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + mode='fusion', + ) + + idx = torch.tensor([i.gt_video_id for i in data_samples]).view(-1, 1) + bs = idx.size(0) + if self.negative_all_rank: + idxs = torch.cat(dist.all_gather(idx)) + image_feat_world = torch.cat(dist.all_gather(image_feat)) + text_feat_world = torch.cat(dist.all_gather(text_feat)) + att_mask_world = torch.cat(dist.all_gather(text_attn_mask)) + text_embeds_world = torch.cat(all_gather_with_grad(text_embeds)) + image_embeds_world = torch.cat(all_gather_with_grad(image_embeds)) + else: + idxs = idx + image_feat_world = image_feat.detach() + text_feat_world = text_feat.detach() + image_embeds_world = image_embeds + text_embeds_world = text_embeds + att_mask_world = text_attn_mask + + with torch.no_grad(): + # compute sample similarity + sim_i2t = torch.einsum('mld,nd->mln', image_feat, + text_feat_world).mean(1) / self.temp + sim_t2i = torch.einsum('md,nld->mln', text_feat, + image_feat_world).mean(1) / self.temp + + mask = torch.eq(idx, idxs.t()).to(self.device) + weights_i2t = F.softmax(sim_i2t + 1e-4, dim=1) + weights_i2t.masked_fill_(mask, 0) + + weights_t2i = F.softmax(sim_t2i + 1e-4, dim=1) + weights_t2i.masked_fill_(mask, 0) + + # select a negative image for each text + neg_idx = torch.multinomial(weights_t2i, 1).squeeze() + image_embeds_neg = image_embeds_world[neg_idx] + + # select a negative text for each image + neg_idx = torch.multinomial(weights_i2t, 1).squeeze() + text_embeds_neg = text_embeds_world[neg_idx] + text_atts_neg = att_mask_world[neg_idx] + + text_embeds_all = torch.cat([text_embeds, text_embeds_neg], dim=0) + text_atts_all = torch.cat([text_attn_mask, text_atts_neg], dim=0) + + image_embeds_all = torch.cat([image_embeds_neg, image_embeds], dim=0) + image_atts_all = torch.cat([image_atts, image_atts], dim=0) + + output_neg = self.text_encoder( + encoder_embeds=text_embeds_all, + attention_mask=text_atts_all, + encoder_hidden_states=image_embeds_all, + encoder_attention_mask=image_atts_all, + return_dict=True, + mode='fusion', + ) + + vl_embeddings = torch.cat( + [ + output_pos.last_hidden_state[:, 0, :], + output_neg.last_hidden_state[:, 0, :], + ], + dim=0, + ) + + itm_targets = torch.ones((3 * bs, ), + dtype=torch.long, + device=inputs.device) + itm_targets[bs:] = 0 + itm_logit = self.itm_head(vl_embeddings) + itm_loss = F.cross_entropy(itm_logit, itm_targets) + + return dict(itc_loss=itc_loss, itm_loss=itm_loss) + + def preprocess_text(self, data_samples): + sample_item = data_samples[0] + + if sample_item is not None and 'text' in sample_item: + if isinstance(sample_item.get('text'), (list, tuple)): + texts = [] + for sample in data_samples: + texts.extend(sample.get('text')) + elif isinstance(sample_item.get('text'), str): + texts = [sample.get('text') for sample in data_samples] + else: + raise TypeError('text must be a string or a list of strings') + else: + return None + + # perform tokenize first if satisfied conditions + texts = self.tokenizer( + texts, + padding='max_length', + truncation=True, + max_length=self.max_txt_len, + return_tensors='pt', + ).to(self.device) + + return texts + + def extract_feat( + self, + images: torch.Tensor = None, + data_samples: List[ActionDataSample] = None, + return_texts=True, + ) -> Dict[str, torch.Tensor]: + """Extract features from the input dict. + + Args: + images (tensor, optional): The images to extract features. + Defaults to None. + data_samples (list, optional): The data samples containing texts + to extract features. Defaults to None. + return_texts (bool): Whether to return the tokenized text and the + corresponding attention masks. Defaults to True. + + Returns: + Tuple[torch.Tensor]: The output features. + If multimodal_backbone is not exist, tuple of torch.Tensor + will be returned. + """ + if data_samples is not None: + texts = self.preprocess_text(data_samples) + else: + texts = None + + assert images is not None or texts is not None, \ + 'At least single modality should be passed as inputs.' + + results = {} + if texts is not None and return_texts: + results.update({ + 'text_ids': texts.input_ids, + 'text_attn_mask': texts.attention_mask, + }) + + # extract image features + if images is not None: + image_embeds, pooled_image_embeds = self.encode_vision(images) + # concat temporal embeds + image_embeds = rearrange(image_embeds, + 'b t l c -> b (t l) c').contiguous() + results['image_embeds'] = image_embeds + results['image_feat'] = F.normalize( + self.vision_proj(pooled_image_embeds), dim=-1) + + # extract text features + if texts is not None: + texts_output = self.text_encoder( + texts.input_ids, + attention_mask=texts.attention_mask, + return_dict=True, + mode='text') + + text_embeds = texts_output.last_hidden_state + pooled_text_feat = text_embeds[:, 0] + results['text_embeds'] = text_embeds + results['text_feat'] = F.normalize( + self.text_proj(pooled_text_feat), dim=-1) + + return results + + def predict(self, images, data_samples, cal_i2t=True, cal_t2i=True): + feats = self.extract_feat(images, data_samples) + + return self.predict_all( + feats, data_samples, cal_i2t=cal_i2t, cal_t2i=cal_t2i) + + def predict_all(self, + feats, + data_samples, + num_images=None, + num_texts=None, + cal_i2t=True, + cal_t2i=True): + text_attn_mask = feats['text_attn_mask'] + image_embeds = feats.get('image_embeds', None) + image_feat = feats['image_feat'] + text_embeds = feats['text_embeds'] + text_feat = feats['text_feat'] + + num_images = num_images or image_feat.size(0) + num_texts = num_texts or text_feat.size(0) + + image_embeds_all = all_gather_concat(image_embeds)[:num_images] + image_feat_all = all_gather_concat(image_feat)[:num_images] + text_feat_all = all_gather_concat(text_feat)[:num_texts] + text_embeds_all = all_gather_concat(text_embeds)[:num_texts] + text_attn_mask_all = all_gather_concat(text_attn_mask)[:num_texts] + + results = [] + if cal_i2t: + result_i2t = self.compute_score_matrix_i2t( + image_feat, + image_embeds, + text_feat_all, + text_embeds_all, + text_attn_mask_all, + ) + results.append( + self._get_predictions(result_i2t, data_samples, mode='i2t')) + if cal_t2i: + result_t2i = self.compute_score_matrix_t2i( + image_feat_all, + image_embeds_all, + text_feat, + text_embeds, + text_attn_mask, + ) + results.append( + self._get_predictions(result_t2i, data_samples, mode='t2i')) + return tuple(results) + + def compute_score_matrix_i2t(self, img_feats, img_embeds, text_feats, + text_embeds, text_atts): + """Compare the score matrix for image-to-text retrieval. Every image + should compare to all the text features. + + Args: + img_feats (torch.Tensor): The input img feats tensor with shape + (M, C). M stands for numbers of samples on a single GPU. + img_embeds (torch.Tensor): The input img embeds tensor with shape + (M, C). M stands for numbers of samples on a single GPU. + text_feats (torch.Tensor): The input text feats tensor with shape + (N, C). N stands for numbers of all samples on all GPUs. + text_embeds (torch.Tensor): The input tensor with shape (N, C). + text_atts (torch.Tensor): The input tensor with shape (N, C). + + Returns: + torch.Tensor: Score matrix of image-to-text retrieval. + """ + # compute i2t sim matrix + sim_matrix_i2t = torch.einsum('mld,nd->mln', img_feats, + text_feats).mean(1) + if self.fast_match: + return sim_matrix_i2t + + score_matrix_i2t = torch.full((img_feats.size(0), text_feats.size(0)), + -100.0).to(self.device) + for i in track_on_main_process( + range(img_feats.size(0)), 'Compute I2T scores...'): + sims = sim_matrix_i2t[i] + topk_sim, topk_idx = sims.topk(k=self.topk, dim=0) + topk_bz = 32 + encoder_output = img_embeds[i].repeat(topk_bz, 1, 1) + encoder_att = torch.ones( + encoder_output.size()[:-1], dtype=torch.long).to(self.device) + for j in range(0, self.topk // topk_bz): + batch_topk = topk_idx[j * topk_bz:(j + 1) * topk_bz] + output = self.text_encoder( + encoder_embeds=text_embeds[batch_topk], + attention_mask=text_atts[batch_topk], + encoder_hidden_states=encoder_output, + encoder_attention_mask=encoder_att, + return_dict=True, + mode='fusion') + score = self.itm_head(output.last_hidden_state[:, 0, :])[:, 1] + score_matrix_i2t[i, batch_topk] = score + return score_matrix_i2t + + def compute_score_matrix_t2i(self, img_feats, img_embeds, text_feats, + text_embeds, text_atts): + """Compare the score matrix for text-to-image retrieval. Every text + should compare to all the image features. + + Args: + img_feats (torch.Tensor): The input img feats tensor with shape + (M, C). M stands for numbers of samples on a single GPU. + img_embeds (torch.Tensor): The input img embeds tensor with shape + (M, C). M stands for numbers of samples on a single GPU. + text_feats (torch.Tensor): The input text feats tensor with shape + (N, C). N stands for numbers of all samples on all GPUs. + text_embeds (torch.Tensor): The input tensor with shape (M, C). + text_atts (torch.Tensor): The input tensor with shape (M, C). + + Returns: + torch.Tensor: Score matrix of text-to-image retrieval. + """ + # compute t2i sim matrix + sim_matrix_t2i = torch.einsum('md,nld->mln', text_feats, + img_feats).mean(1) + + if self.fast_match: + return sim_matrix_t2i + + score_matrix_t2i = torch.full((text_feats.size(0), img_feats.size(0)), + -100.0).to(self.device) + for i in track_on_main_process( + range(text_feats.size(0)), 'Compute T2I scores...'): + sims = sim_matrix_t2i[i] + topk_sim, topk_idx = sims.topk(k=self.topk, dim=0) + topk_bz = 32 + for j in range(0, self.topk // topk_bz): + batch_topk = topk_idx[j * topk_bz:(j + 1) * topk_bz] + encoder_output = img_embeds[batch_topk] + encoder_att = torch.ones( + encoder_output.size()[:-1], + dtype=torch.long).to(self.device) + output = self.text_encoder( + encoder_embeds=text_embeds[i].repeat(topk_bz, 1, 1), + attention_mask=text_atts[i].repeat(topk_bz, 1), + encoder_hidden_states=encoder_output, + encoder_attention_mask=encoder_att, + return_dict=True, + mode='fusion') + score = self.itm_head(output.last_hidden_state[:, 0, :])[:, 1] + score_matrix_t2i[i, batch_topk] = score + return score_matrix_t2i + + def _get_predictions(self, + result: torch.Tensor, + data_samples: List[ActionDataSample], + mode: str = 'i2t'): + """Post-process the output of retriever. + + Args: + result (torch.Tensor): Score matrix of single retrieve, + either from image or text. + data_samples (List[ActionDataSample], optional): The annotation + data of every samples. + mode (str): Retrieve mode, either `i2t` for image to text, or `t2i` + text to image. Defaults to `i2t`. + + Returns: + List[ActionDataSample]: the raw data_samples with + the predicted results. + """ + + # create data sample if not exists + if data_samples is None: + data_samples = [ActionDataSample() for _ in range(result.size(0))] + elif mode == 't2i': + # Process data samples to align with the num of texts. + new_data_samples = [] + for sample in data_samples: + if isinstance(sample.text, (list, tuple)): + texts = sample.text + else: + texts = [sample.text] + for i, text in enumerate(texts): + new_sample = ActionDataSample(text=text) + if 'gt_video_id' in sample: + new_sample.gt_label = sample.gt_video_id[i] + new_data_samples.append(new_sample) + assert len(new_data_samples) == result.size(0) + data_samples = new_data_samples + elif mode == 'i2t': + for sample in data_samples: + if 'gt_text_id' in sample: + sample.gt_label = sample.gt_text_id + else: + raise ValueError(f'Type {mode} is not supported.') + + for data_sample, score in zip(data_samples, result): + idx = score.argmax(keepdim=True).detach() + + data_sample.set_pred_score(score) + data_sample.set_pred_label(idx) + return data_samples diff --git a/mmaction/models/multimodal/vindlu/vindlu_ret_mc.py b/mmaction/models/multimodal/vindlu/vindlu_ret_mc.py new file mode 100644 index 0000000000..d701438bb7 --- /dev/null +++ b/mmaction/models/multimodal/vindlu/vindlu_ret_mc.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F +from einops import rearrange + +from mmaction.registry import MODELS +from .vindlu_ret import VindLURetrieval + + +@MODELS.register_module() +class VindLURetrievalMC(VindLURetrieval): + """VindLU VQA retrieval multiple choice. + + score_weight (float): Weight coefficient for itm_head score to compute the + choice score. similarity_weight (float): Weight coefficient for similarity + score to compute the choice score. + """ + + def __init__(self, score_weight=0.7, similarity_weight=0.3, **kwargs): + kwargs.pop('text_decoder') + super().__init__(**kwargs) + self.score_weight = score_weight + self.similarity_weight = similarity_weight + + def predict(self, inputs, data_samples, **kwargs): + """Predict captions from a batch of inputs. + + Args: + images (torch.Tensor): The input images tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample], optional): The annotation + data of every samples. Defaults to None. + **kwargs: Other keyword arguments accepted by the ``predict`` + + Returns: + List[ActionDataSample]: Return list of data samples. + """ + num_options_per_q = len(data_samples[0].caption_options) + for sample in data_samples: + sample.text = sample.caption_options + + output = self.extract_feat(inputs, data_samples) + + text_embeds = output['text_embeds'] + text_attn_mask = output['text_attn_mask'] + image_embeds = output['image_embeds'] + image_feat = output['image_feat'] + text_feat = output['text_feat'] + + # compute similarity between vision feat and caption feat + text_feat = rearrange( + text_feat, '(b n) c -> b c n', n=num_options_per_q) + sim = torch.matmul(image_feat.mean(1, keepdim=True), + text_feat).squeeze(1) / self.temp + sim = F.softmax(sim, dim=1).flatten() + + # cross-modal encode + encoder_output = image_embeds.repeat_interleave( + num_options_per_q, dim=0) + image_atts = torch.ones( + encoder_output.size()[:-1], dtype=torch.long).to(inputs.device) + output = self.text_encoder( + encoder_embeds=text_embeds, + attention_mask=text_attn_mask, + encoder_hidden_states=encoder_output, + encoder_attention_mask=image_atts, + return_dict=True, + mode='fusion', + ) + itm_embeds = output.last_hidden_state[:, 0] # [CLS] + + itm_score = F.softmax(self.itm_head(itm_embeds), dim=1)[:, 1] # [bs*5] + score = itm_score * self.score_weight + sim * self.similarity_weight + + pred_answers = score.view(-1, num_options_per_q).max(1)[1].cpu() + + # assemble predictions + ensemble_scores = score.view(-1, num_options_per_q).cpu() # (bsz, 5) + + out_data_samples = [] + for data_sample, ensemble_score, pred_ans in \ + zip(data_samples, ensemble_scores, pred_answers): + data_sample.pred_label = pred_ans.item() + data_sample.score = ensemble_score.numpy() + out_data_samples.append(data_sample) + + return out_data_samples diff --git a/mmaction/models/multimodal/vindlu/vindlu_vqa.py b/mmaction/models/multimodal/vindlu/vindlu_vqa.py new file mode 100644 index 0000000000..87233b9b21 --- /dev/null +++ b/mmaction/models/multimodal/vindlu/vindlu_vqa.py @@ -0,0 +1,266 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import mmengine +import torch +import torch.nn.functional as F +from einops import rearrange + +from mmaction.registry import MODELS +from .vindlu import VindLUBase + + +@MODELS.register_module() +class VindLUVQA(VindLUBase): + """VindLU VQA. + + Args: + text_decoder (dict): Backbone for extracting + multi-modal features. We apply this part as VQA fusion module. + answer_list_path (str, optional): Path to `answer_list.json`. + max_question_len (int): Max text length of question text. + Defaults to 25. + max_answer_len (int): Max text length of answer text. Defaults to 5. + num_ans_candidates (int): Number of answer candidates, used to filter + out answers with low probability. Defaults to 128. + **kwargs: Other keyword arguments accepted by the VindLUBase. + """ + + def __init__(self, + text_decoder: dict, + answer_list_path: Optional[str] = None, + max_question_len: int = 25, + max_answer_len: int = 5, + num_ans_candidates: int = 128, + **kwargs): + super().__init__(**kwargs) + + self.max_question_len = max_question_len + self.max_answer_len = max_answer_len + self.num_ans_candidates = num_ans_candidates + self.answer_list_path = answer_list_path + self.text_decoder_cfg = text_decoder + + # for inference only + if answer_list_path: + self.answer_list = mmengine.load(answer_list_path) + + # delete extra/unnecessary modules inherited from VindLUBase + extra_attributes = ['vision_proj', 'text_proj', 'temp', 'itm_head'] + for attr in extra_attributes: + delattr(self, attr) + + self.text_decoder_cfg.gradient_checkpointing = \ + self.gradient_checkpointing + self.text_decoder = MODELS.build(self.text_decoder_cfg) + + def forward_encoder(self, inputs, data_samples): + # forward vision encoder + image_embeds, _ = self.encode_vision(inputs) + image_embeds = rearrange(image_embeds, 'b t l c -> b (t l) c') + image_atts = torch.ones( + image_embeds.size()[:-1], dtype=torch.long).to(inputs.device) + + # forward text encoder + questions = [sample.question for sample in data_samples] + questions = self.tokenizer( + questions, + padding='max_length', + truncation=True, + max_length=self.max_question_len, + return_tensors='pt').to(inputs.device) + + question_output = self.text_encoder( + questions.input_ids, + attention_mask=questions.attention_mask, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True) + + return questions, question_output + + def loss(self, inputs, data_samples): + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (dict): A batch of inputs. The input tensor with of + at least one modality. For image, the value is a tensor + of shape (N, C, ...) in general. + For text, the value is a dict of tokenized text inputs. + data_samples (Optional[List[DataSample]]): + The annotation data of every samples. Defaults to None. + + Returns: + Dict[str, torch.tensor]: a dictionary of loss components of + """ + + questions, question_output = self.forward_encoder(inputs, data_samples) + + weights = torch.cat( + [torch.tensor(sample.gt_answer_weight) for sample in data_samples], + dim=0).to(inputs.device) + raw_answers = [] + for sample in data_samples: + raw_answers.extend(sample.gt_answer) + answer_count = torch.tensor([ + len(sample.gt_answer) for sample in data_samples + ]).to(inputs.device) + answers = [a + ' ' + '[SEP]' for a in raw_answers] + answers = self.tokenizer( + answers, + padding='max_length', + truncation=True, + max_length=self.max_answer_len, + return_tensors='pt').to(inputs.device) + + answer_targets = answers.input_ids.masked_fill( + answers.input_ids == self.tokenizer.pad_token_id, -100) + + question_states = [] + question_atts = [] + for b, n in enumerate(answer_count): + question_states += [question_output.last_hidden_state[b]] * n + question_atts += [questions.attention_mask[b]] * n + question_states = torch.stack(question_states, 0).to(inputs.device) + question_atts = torch.stack(question_atts, 0).to(inputs.device) + + answer_output = self.text_decoder( + answers.input_ids, + attention_mask=answers.attention_mask, + encoder_hidden_states=question_states, + encoder_attention_mask=question_atts, + labels=answer_targets, + return_dict=True, + reduction='none', + ) + loss = weights * answer_output.loss + loss = loss.sum() / inputs.size(0) + + return dict(loss=loss) + + def predict(self, inputs, data_samples, **kwargs): + + questions, question_output = self.forward_encoder(inputs, data_samples) + + raw_answers = self.answer_list + answers = [a + ' ' + '[SEP]' for a in raw_answers] + answers = self.tokenizer( + answers, + padding='max_length', + truncation=True, + max_length=self.max_answer_len, + return_tensors='pt', + ).to(inputs.device) + + topk_ids, topk_probs = self.rank_answer( + question_output.last_hidden_state, questions.attention_mask, + answers.input_ids, answers.attention_mask, self.num_ans_candidates) + + out_data_samples = [] + for data_sample, topk_id, topk_prob in zip(data_samples, topk_ids, + topk_probs): + _, pred = topk_prob.max(dim=0) + data_sample.pred_answer = raw_answers[topk_id[pred]] + out_data_samples.append(data_sample) + + return out_data_samples + + def rank_answer(self, question_states, question_atts, answer_ids, + answer_atts, k): + """ + question_states: (bsz, Lq, d) + answer_ids: answer input id after tokenization, (#answers, La) + """ + num_ques = question_states.size(0) + start_ids = answer_ids[0, 0].repeat(num_ques, 1) # bos token + + start_output = self.text_decoder( + start_ids, + encoder_hidden_states=question_states, + encoder_attention_mask=question_atts, + return_dict=True, + reduction='none', + ) + logits = start_output.logits[:, 0, :] # first token's logit + + # topk_probs: top-k probability + # topk_ids: [num_question, k] + answer_first_token = answer_ids[:, 1] + prob_first_token = F.softmax( + logits, dim=1).index_select( + dim=1, index=answer_first_token) + topk_probs, topk_ids = prob_first_token.topk(k, dim=1) + + # answer input: [num_question*k, answer_len] + input_ids = [] + input_atts = [] + for b, topk_id in enumerate(topk_ids): + input_ids.append(answer_ids.index_select(dim=0, index=topk_id)) + input_atts.append(answer_atts.index_select(dim=0, index=topk_id)) + input_ids = torch.cat(input_ids, dim=0) + input_atts = torch.cat(input_atts, dim=0) + + targets_ids = input_ids.masked_fill( + input_ids == self.tokenizer.pad_token_id, -100) + + question_states = question_states.repeat_interleave(k, dim=0) + question_atts = question_atts.repeat_interleave(k, dim=0) + + output = self.text_decoder( + input_ids, + attention_mask=input_atts, + encoder_hidden_states=question_states, + encoder_attention_mask=question_atts, + labels=targets_ids, + return_dict=True, + reduction='none', + ) + + answer_loss = output.loss + answer_loss = answer_loss.view(input_ids.size(0), -1) + + # topk_prob: first token probability + topk_probs = topk_probs.view(-1, 1) + log_probs = torch.cat([topk_probs.log(), -answer_loss], dim=1) + + # re-calculate log probabilities for the answer sequences + # using chain rule + log_probs_sum = log_probs.sum(1) + log_probs_sum = log_probs_sum.view(num_ques, k) + + topk_probs = F.softmax(log_probs_sum, dim=-1) + # get top-k after re-ranking + topk_probs, rerank_id = topk_probs.topk(k, dim=1) + topk_ids = torch.gather(topk_ids, 1, rerank_id) + + return topk_ids, topk_probs + + def preprocess_state_dict(self, state_dict): + """Preprocess pretrained checkpoint for text_encoder and + text_decoder.""" + for key in list(state_dict.keys()): + if 'bert' in key: + encoder_key = key.replace('bert.', '') + state_dict[encoder_key] = state_dict[key] + + # init text decoder as multimodal encoder + # (last 6 layers of model.text_encoder) + # only for generation tasks like VQA + if self.text_decoder_cfg and 'text_encoder' in key: + if 'layer' in key: + encoder_keys = key.split('.') + layer_num = int(encoder_keys[4]) + if layer_num < self.text_encoder_cfg.fusion_layer: + del state_dict[key] + continue + else: + decoder_layer_num = layer_num - 9 + encoder_keys[4] = str(decoder_layer_num) + encoder_key = '.'.join(encoder_keys) + else: + encoder_key = key + decoder_key = encoder_key.replace('text_encoder', + 'text_decoder') + state_dict[decoder_key] = state_dict[key] + del state_dict[key] + return state_dict diff --git a/mmaction/models/multimodal/vindlu/xbert.py b/mmaction/models/multimodal/vindlu/xbert.py new file mode 100644 index 0000000000..df020ce535 --- /dev/null +++ b/mmaction/models/multimodal/vindlu/xbert.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmaction.registry import MODELS +from .modeling_bert import (BertConfig, BertForMaskedLM, BertLMHeadModel, + BertModel) + + +@MODELS.register_module() +class XBertForMaskedLM(BertForMaskedLM): + + def __init__(self, pretrained_model_name_or_path, fusion_layer, + encoder_width, **kwargs): + config = BertConfig.from_pretrained(pretrained_model_name_or_path) + config.fusion_layer = fusion_layer + config.encoder_width = encoder_width + config.update(kwargs) + super().__init__(config) + + +@MODELS.register_module() +class XBertModel(BertModel): + + def __init__(self, pretrained_model_name_or_path, fusion_layer, + encoder_width, add_pooling_layer, **kwargs): + config = BertConfig.from_pretrained(pretrained_model_name_or_path) + config.fusion_layer = fusion_layer + config.encoder_width = encoder_width + config.update(kwargs) + super().__init__(config, add_pooling_layer) + + +@MODELS.register_module() +class BertDecoder(BertLMHeadModel): + + def __init__(self, pretrained_model_name_or_path, fusion_layer, + encoder_width, **kwargs): + config = BertConfig.from_pretrained(pretrained_model_name_or_path) + config.fusion_layer = fusion_layer + config.encoder_width = encoder_width + config.update(kwargs) + super().__init__(config) diff --git a/mmaction/registry.py b/mmaction/registry.py index 6d7d831db1..f214d514e5 100644 --- a/mmaction/registry.py +++ b/mmaction/registry.py @@ -54,7 +54,7 @@ DATA_SAMPLERS = Registry( 'data sampler', parent=MMENGINE_DATA_SAMPLERS, - locations=['mmaction.engine']) + locations=['mmaction.datasets']) TRANSFORMS = Registry( 'transform', parent=MMENGINE_TRANSFORMS, @@ -132,3 +132,9 @@ # manage function FUNCTION = Registry( 'function', parent=MMENGINE_FUNCTION, locations=['mmaction.mmengine']) + +# Tokenizer to encode sequence +TOKENIZER = Registry( + 'tokenizer', + locations=['mmaction.models'], +) diff --git a/mmaction/utils/__init__.py b/mmaction/utils/__init__.py index af91d382c4..54e78dd2b6 100644 --- a/mmaction/utils/__init__.py +++ b/mmaction/utils/__init__.py @@ -3,17 +3,12 @@ from .gradcam_utils import GradCAM from .misc import (VideoWriter, frame_extract, get_random_string, get_shm_dir, get_str_type, get_thread_id) +from .progress import track, track_on_main_process from .setup_env import register_all_modules from .typing_utils import * # noqa: F401,F403 __all__ = [ - 'collect_env', - 'get_random_string', - 'get_thread_id', - 'get_shm_dir', - 'frame_extract', - 'GradCAM', - 'register_all_modules', - 'VideoWriter', - 'get_str_type', + 'collect_env', 'get_random_string', 'get_thread_id', 'get_shm_dir', + 'frame_extract', 'GradCAM', 'register_all_modules', 'VideoWriter', + 'get_str_type', 'track', 'track_on_main_process' ] diff --git a/mmaction/utils/dependency.py b/mmaction/utils/dependency.py new file mode 100644 index 0000000000..dd8df115ec --- /dev/null +++ b/mmaction/utils/dependency.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import re +from functools import wraps +from inspect import isfunction + +from importlib_metadata import PackageNotFoundError, distribution +from mmengine.utils import digit_version + + +def satisfy_requirement(dep): + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, dep, maxsplit=1) + parts = [p.strip() for p in parts] + package = parts[0] + if len(parts) > 1: + op, version = parts[1:] + op = { + '>=': '__ge__', + '==': '__eq__', + '>': '__gt__', + '<': '__lt__', + '<=': '__le__' + }[op] + else: + op, version = None, None + + try: + dist = distribution(package) + if op is None or getattr(digit_version(dist.version), op)( + digit_version(version)): + return True + except PackageNotFoundError: + pass + + return False + + +def require(dep, install=None): + """A wrapper of function for extra package requirements. + + Args: + dep (str): The dependency package name, like ``transformers`` + or ``transformers>=4.28.0``. + install (str, optional): The installation command hint. Defaults + to None, which means to use "pip install dep". + """ + + def wrapper(fn): + assert isfunction(fn) + + @wraps(fn) + def ask_install(*args, **kwargs): + name = fn.__qualname__.replace('.__init__', '') + ins = install or f'pip install "{dep}"' + raise ImportError( + f'{name} requires {dep}, please install it by `{ins}`.') + + if satisfy_requirement(dep): + fn._verify_require = getattr(fn, '_verify_require', lambda: None) + return fn + + ask_install._verify_require = ask_install + return ask_install + + return wrapper + + +WITH_MULTIMODAL = all( + satisfy_requirement(item) for item in ['transformers>=4.28.0']) + + +def register_multimodal_placeholder(names, registry): + for name in names: + + def ask_install(*args, **kwargs): + raise ImportError( + f'{name} requires extra multi-modal dependencies, please ' + 'install it by `pip install "mmaction2[multimodal]"` ' + 'or `pip install -e ".[multimodal]"`.') + + registry.register_module(name=name, module=ask_install) diff --git a/mmaction/utils/progress.py b/mmaction/utils/progress.py new file mode 100644 index 0000000000..b23f976a42 --- /dev/null +++ b/mmaction/utils/progress.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import mmengine.dist as dist +import rich.progress as progress +from rich.live import Live + +disable_progress_bar = False +global_progress = progress.Progress( + '{task.description}', + progress.BarColumn(), + progress.TaskProgressColumn(show_speed=True), + progress.TimeRemainingColumn(), +) +global_live = Live(global_progress, refresh_per_second=10) + + +def track(sequence, description: str = '', total: Optional[float] = None): + if disable_progress_bar: + yield from sequence + else: + global_live.start() + task_id = global_progress.add_task(description, total=total) + task = global_progress._tasks[task_id] + try: + yield from global_progress.track(sequence, task_id=task_id) + finally: + if task.total is None: + global_progress.update(task_id, total=task.completed) + if all(task.finished for task in global_progress.tasks): + global_live.stop() + for task_id in global_progress.task_ids: + global_progress.remove_task(task_id) + + +def track_on_main_process(sequence, description='', total=None): + if not dist.is_main_process() or disable_progress_bar: + yield from sequence + else: + yield from track(sequence, total=total, description=description) diff --git a/requirements/multimodal.txt b/requirements/multimodal.txt new file mode 100644 index 0000000000..c3503a0875 --- /dev/null +++ b/requirements/multimodal.txt @@ -0,0 +1 @@ +transformers>=4.28.0 diff --git a/setup.py b/setup.py index 4776e54145..94471e5220 100644 --- a/setup.py +++ b/setup.py @@ -191,5 +191,6 @@ def add_mim_extension(): 'tests': parse_requirements('requirements/tests.txt'), 'optional': parse_requirements('requirements/optional.txt'), 'mim': parse_requirements('requirements/mminstall.txt'), + 'multimodal': parse_requirements('requirements/multimodal.txt'), }, zip_safe=False) diff --git a/tests/evaluation/metrics/test_retrieval_metric.py b/tests/evaluation/metrics/test_retrieval_metric.py index cb1f1c72ba..fffc0dbacc 100644 --- a/tests/evaluation/metrics/test_retrieval_metric.py +++ b/tests/evaluation/metrics/test_retrieval_metric.py @@ -1,8 +1,13 @@ # Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np import pytest import torch -from mmaction.evaluation.metrics import RetrievalMetric +from mmaction.evaluation.metrics import RetrievalMetric, RetrievalRecall +from mmaction.registry import METRICS +from mmaction.structures import ActionDataSample def generate_data(num_samples=5, feat_dim=10, random_label=False): @@ -47,3 +52,114 @@ def test_acc_metric(): assert eval_results['R1'] == eval_results['R5'] == eval_results[ 'R10'] == 100.0 assert eval_results['MdR'] == eval_results['MnR'] == 1.0 + + +class TestRetrievalRecall(TestCase): + + def test_evaluate(self): + """Test using the metric in the same way as Evalutor.""" + pred = [ + ActionDataSample().set_pred_score(i).set_gt_label(k).to_dict() + for i, k in zip([ + torch.tensor([0.7, 0.0, 0.3]), + torch.tensor([0.5, 0.2, 0.3]), + torch.tensor([0.4, 0.5, 0.1]), + torch.tensor([0.0, 0.0, 1.0]), + torch.tensor([0.0, 0.0, 1.0]), + torch.tensor([0.0, 0.0, 1.0]), + ], [[0], [0], [1], [2], [2], [0]]) + ] + + # Test with score (use score instead of label if score exists) + metric = METRICS.build(dict(type='RetrievalRecall', topk=1)) + metric.process(None, pred) + recall = metric.evaluate(6) + self.assertIsInstance(recall, dict) + self.assertAlmostEqual( + recall['retrieval/Recall@1'], 5 / 6 * 100, places=4) + + # Test with invalid topk + with self.assertRaisesRegex(RuntimeError, 'selected index k'): + metric = METRICS.build(dict(type='RetrievalRecall', topk=10)) + metric.process(None, pred) + metric.evaluate(6) + + with self.assertRaisesRegex(ValueError, '`topk` must be a'): + METRICS.build(dict(type='RetrievalRecall', topk=-1)) + + # Test initialization + metric = METRICS.build(dict(type='RetrievalRecall', topk=5)) + self.assertEqual(metric.topk, (5, )) + + # Test initialization + metric = METRICS.build(dict(type='RetrievalRecall', topk=(1, 2, 5))) + self.assertEqual(metric.topk, (1, 2, 5)) + + def test_calculate(self): + """Test using the metric from static method.""" + + # seq of indices format + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + y_pred = [np.arange(10)] * 2 + + # test with average is 'macro' + recall_score = RetrievalRecall.calculate( + y_pred, y_true, topk=1, pred_indices=True, target_indices=True) + expect_recall = 50. + self.assertEqual(recall_score[0].item(), expect_recall) + + # test with tensor input + y_true = torch.Tensor([[1, 0, 1, 0, 0, 1, 0, 0, 1, 1], + [0, 1, 0, 0, 1, 0, 1, 0, 0, 0]]) + y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) + recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=1) + expect_recall = 50. + self.assertEqual(recall_score[0].item(), expect_recall) + + # test with topk is 5 + y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) + recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=2) + expect_recall = 100. + self.assertEqual(recall_score[0].item(), expect_recall) + + # test with topk is (1, 5) + y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) + recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=(1, 5)) + expect_recalls = [50., 100.] + self.assertEqual(len(recall_score), len(expect_recalls)) + for i in range(len(expect_recalls)): + self.assertEqual(recall_score[i].item(), expect_recalls[i]) + + # Test with invalid pred + y_pred = dict() + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + with self.assertRaisesRegex(AssertionError, '`pred` must be Seq'): + RetrievalRecall.calculate(y_pred, y_true, True, True) + + # Test with invalid target + y_true = dict() + y_pred = [np.arange(10)] * 2 + with self.assertRaisesRegex(AssertionError, '`target` must be Seq'): + RetrievalRecall.calculate( + y_pred, y_true, topk=1, pred_indices=True, target_indices=True) + + # Test with different length `pred` with `target` + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + y_pred = [np.arange(10)] * 3 + with self.assertRaisesRegex(AssertionError, 'Length of `pred`'): + RetrievalRecall.calculate( + y_pred, y_true, topk=1, pred_indices=True, target_indices=True) + + # Test with invalid pred + y_true = [[0, 2, 5, 8, 9], dict()] + y_pred = [np.arange(10)] * 2 + with self.assertRaisesRegex(AssertionError, '`target` should be'): + RetrievalRecall.calculate( + y_pred, y_true, topk=1, pred_indices=True, target_indices=True) + + # Test with invalid target + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + y_pred = [np.arange(10), dict()] + with self.assertRaisesRegex(AssertionError, '`pred` should be'): + RetrievalRecall.calculate( + y_pred, y_true, topk=1, pred_indices=True, target_indices=True) diff --git a/tools/data/msrvtt/README.md b/tools/data/msrvtt/README.md new file mode 100644 index 0000000000..e9e72ad6b4 --- /dev/null +++ b/tools/data/msrvtt/README.md @@ -0,0 +1,68 @@ +# Preparing MSR-VTT Retrieval/ Video Question-Answering Dataset + +## Introduction + + + +```BibTeX +@inproceedings{xu2016msr, + title={Msr-vtt: A large video description dataset for bridging video and language}, + author={Xu, Jun and Mei, Tao and Yao, Ting and Rui, Yong}, + booktitle={CVPR}, + pages={5288--5296}, + year={2016} +} +``` + +Before preparing the dataset, please make sure that the directory is located at `$MMACTION2/tools/data/msrvtt/`. + +## Step 1. Download Annotation Files + +You can directly download the following annotation files related to MSR-VTT from the [Google Drive link](https://drive.google.com/drive/folders/12cr94wT8j7pR09AR2nmQg6o26Y1arI50) provided by [VindLU](https://github.com/klauscc) and place them in the `$MMACTION2/tools/data/msrvtt/annotations` directory: + +- [msrvtt_qa_train.json](https://drive.google.com/file/d/12dJq5_7v8FytrJwrPB_f22tET1MmGCNh/view?usp=drive_link) +- [msrvtt_qa_val.json](https://drive.google.com/file/d/138q-A-V8fCC2nBYJgqkQa3gBfXVNbNNd/view?usp=drive_link) +- [msrvtt_qa_test.json](https://drive.google.com/file/d/13IiEcUMHiNppWhGwVY1eAaip6iSJM35A/view?usp=drive_link) +- [msrvtt_qa_answer_list.json](https://drive.google.com/file/d/131euz_dssRkDTk3-ioAS5ZsvIxS_Tt4M/view?usp=drive_link) +- [msrvtt_mc_test.json](https://drive.google.com/file/d/13FrUQ2ZDsNDraP7lfnKvTArPIgdtHuLC/view?usp=drive_link) +- [msrvtt_ret_train9k.json](https://drive.google.com/file/d/13OVo0XRdVWTHlFFxbKg3daYCHsMbJxyd/view?usp=drive_link) +- [msrvtt_ret_train7k.json](https://drive.google.com/file/d/13ID97BX4ExO6mWPIUMp-GzXcPBkviSLx/view?usp=drive_link) +- [msrvtt_ret_test1k.json](https://drive.google.com/file/d/13FLrjI-aleKeU7LbJMDrYgktX7MbTbzu/view?usp=drive_link) +- [msrvtt_test1k.json](https://drive.google.com/file/d/12z6y-DNwIfICSzOhekbJwSbf7z2hlibE/view?usp=drive_link) + +## Step 2. Prepare Video Data + +You can refer to the [official website](https://www.microsoft.com/en-us/research/publication/msr-vtt-a-large-video-description-dataset-for-bridging-video-and-language/) of this dataset for basic information. Run the following commands to prepare the MSRVTT video files: + +```shell +# Download original videos +bash download_msrvtt.sh +# Preprocess videos to lower FPS and dimensions +bash compress_msrvtt.sh +``` + +After completing the above preparation steps, the directory structure will be as follows: + +``` +mmaction2 +โ”œโ”€โ”€ mmaction +โ”œโ”€โ”€ tools +โ”œโ”€โ”€ configs +โ”œโ”€โ”€ data +โ”‚ โ””โ”€โ”€ msrvtt +โ”‚ โ”‚ โ”œโ”€โ”€ annotations +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_qa_train.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_qa_val.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_qa_test.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_qa_answer_list.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_mc_test.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_ret_train9k.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_ret_train7k.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_ret_test1k.json +โ”‚ โ”‚ โ”‚ โ””โ”€โ”€ msrvtt_test1k.json +โ”‚ โ”‚ โ””โ”€โ”€ videos_2fps_224 +โ”‚ โ”‚ โ”œโ”€โ”€ video0.mp4 +โ”‚ โ”‚ โ”œโ”€โ”€ video1.mp4 +โ”‚ โ”‚ โ”œโ”€โ”€ ... +โ”‚ โ”‚ โ””โ”€โ”€ video9999.mp4 +``` diff --git a/tools/data/msrvtt/README_zh-CN.md b/tools/data/msrvtt/README_zh-CN.md new file mode 100644 index 0000000000..bbd3a009c4 --- /dev/null +++ b/tools/data/msrvtt/README_zh-CN.md @@ -0,0 +1,68 @@ +# ๅ‡†ๅค‡ MSR-VTT ๆฃ€็ดข/่ง†้ข‘้—ฎ็ญ”ๆ•ฐๆฎ้›† + +## ็ฎ€ไป‹ + + + +```BibTeX +@inproceedings{xu2016msr, + title={Msr-vtt: A large video description dataset for bridging video and language}, + author={Xu, Jun and Mei, Tao and Yao, Ting and Rui, Yong}, + booktitle={CVPR}, + pages={5288--5296}, + year={2016} +} +``` + +ๅœจๆ•ฐๆฎ้›†ๅ‡†ๅค‡ๅ‰๏ผŒ่ฏท็กฎไฟๅ‘ฝไปค่กŒๅฝ“ๅ‰่ทฏๅพ„ไธบ `$MMACTION2/tools/data/msrvtt/`ใ€‚ + +## ๆญฅ้ชค 1. ไธ‹่ฝฝๆ ‡ๆณจๆ–‡ไปถ + +็”จๆˆทๅฏไปŽ [VindLU](https://github.com/klauscc/VindLU) ๆไพ›็š„ [Google Drive ้“พๆŽฅ](https://drive.google.com/drive/folders/12cr94wT8j7pR09AR2nmQg6o26Y1arI50)ไธญ็›ดๆŽฅไธ‹่ฝฝไปฅไธ‹ไธŽ MSR-VTT ็›ธๅ…ณ็š„ๆ ‡ๆณจๆ–‡ไปถ, ๅนถๆ”พ็ฝฎๅˆฐ `$MMACTION2/tools/data/msrvtt/annotations` ่ทฏๅพ„ไธ‹: + +- [msrvtt_qa_train.json](https://drive.google.com/file/d/12dJq5_7v8FytrJwrPB_f22tET1MmGCNh/view?usp=drive_link) +- [msrvtt_qa_val.json](https://drive.google.com/file/d/138q-A-V8fCC2nBYJgqkQa3gBfXVNbNNd/view?usp=drive_link) +- [msrvtt_qa_test.json](https://drive.google.com/file/d/13IiEcUMHiNppWhGwVY1eAaip6iSJM35A/view?usp=drive_link) +- [msrvtt_qa_answer_list.json](https://drive.google.com/file/d/131euz_dssRkDTk3-ioAS5ZsvIxS_Tt4M/view?usp=drive_link) +- [msrvtt_mc_test.json](https://drive.google.com/file/d/13FrUQ2ZDsNDraP7lfnKvTArPIgdtHuLC/view?usp=drive_link) +- [msrvtt_ret_train9k.json](https://drive.google.com/file/d/13OVo0XRdVWTHlFFxbKg3daYCHsMbJxyd/view?usp=drive_link) +- [msrvtt_ret_train7k.json](https://drive.google.com/file/d/13ID97BX4ExO6mWPIUMp-GzXcPBkviSLx/view?usp=drive_link) +- [msrvtt_ret_test1k.json](https://drive.google.com/file/d/13FLrjI-aleKeU7LbJMDrYgktX7MbTbzu/view?usp=drive_link) +- [msrvtt_test1k.json](https://drive.google.com/file/d/12z6y-DNwIfICSzOhekbJwSbf7z2hlibE/view?usp=drive_link) + +## ๆญฅ้ชค 2. ๅ‡†ๅค‡่ง†้ข‘ๆ•ฐๆฎ + +็”จๆˆทๅฏๅ‚่€ƒ่ฏฅๆ•ฐๆฎ้›†็š„[ๅฎ˜็ฝ‘](https://www.microsoft.com/en-us/research/publication/msr-vtt-a-large-video-description-dataset-for-bridging-video-and-language/)๏ผŒไปฅ่Žทๅ–ๆ•ฐๆฎ้›†็›ธๅ…ณ็š„ๅŸบๆœฌไฟกๆฏใ€‚่ฟ่กŒไธ‹้ข็š„ๅ‘ฝไปคๅ‡†ๅค‡ MSRVTT ่ง†้ข‘ๆ–‡ไปถ: + +```shell +# download original videos +bash download_msrvtt.sh +# preprocess videos to lower FPS and dimension +bash compress_msrvtt.sh +``` + +ๅฎŒๆˆไธŠ่ฟฐๅ‡†ๅค‡ๆญฅ้ชคๅŽ๏ผŒๆ–‡ไปถ็›ฎๅฝ•ๅฆ‚ไธ‹๏ผš + +``` +mmaction2 +โ”œโ”€โ”€ mmaction +โ”œโ”€โ”€ tools +โ”œโ”€โ”€ configs +โ”œโ”€โ”€ data +โ”‚ โ””โ”€โ”€ msrvtt +โ”‚ โ”‚ โ”œโ”€โ”€ annotations +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_qa_train.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_qa_val.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_qa_test.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_qa_answer_list.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_mc_test.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_ret_train9k.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_ret_train7k.json +โ”‚ โ”‚ โ”‚ โ”œโ”€โ”€ msrvtt_ret_test1k.json +โ”‚ โ”‚ โ”‚ โ””โ”€โ”€ msrvtt_test1k.json +โ”‚ โ”‚ โ””โ”€โ”€ videos_2fps_224 +โ”‚ โ”‚ โ”œโ”€โ”€ video0.mp4 +โ”‚ โ”‚ โ”œโ”€โ”€ video1.mp4 +โ”‚ โ”‚ โ”œโ”€โ”€ ... +โ”‚ โ”‚ โ””โ”€โ”€ video9999.mp4 +``` diff --git a/tools/data/msrvtt/compress.py b/tools/data/msrvtt/compress.py new file mode 100644 index 0000000000..48f022ddba --- /dev/null +++ b/tools/data/msrvtt/compress.py @@ -0,0 +1,192 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Used to compress videos (FPS and dimensions) in the Singularity project. + +copied from https://github.com/klauscc/VindLU +""" +import argparse +import os +import shutil +import subprocess +from multiprocessing import Pool +from os.path import exists, join +from pathlib import Path + +try: + from psutil import cpu_count +except ImportError: + from multiprocessing import cpu_count + +from functools import partial + +from PIL import Image +from tqdm import tqdm + + +def resize_image(input_path, output_path, size=224): + with Image.open(input_path) as img: + w, h = img.width, img.height + r = 1. * w / h + if w > h: + h = size + w = r * size + else: + h = size / r + w = size + + img_resized = img.resize((int(w), int(h))) + img_resized.save(output_path) + + +def _compress_images(input_output_pair, size=224): + """Scale and downsample an input image to a given fps and size (shorter + side size). + + This also removes the audio from the image. + """ + input_image_path, output_image_path = input_output_pair + try: + resize_image(input_image_path, output_image_path, size) + except Exception as e: + print(f'Caught Exception {e}') + + +def _compress_videos(input_output_pair, size=224, fps=3): + """Scale and downsample an input video to a given fps and size (shorter + side size). + + This also removes the audio from the video. + """ + input_file_path, output_file_path = input_output_pair + try: + command = [ + 'ffmpeg', + '-y', # (optional) overwrite output file if it exists + '-i', + input_file_path, + '-filter:v', # no audio + f"scale='if(gt(a,1),trunc(oh*a/2)*2,{size})':'if(gt(a,1),{size},trunc(ow*a/2)*2)'", # noqa: E501 + '-map', + '0:v', # no audio + '-r', + str(fps), # frames per second + # '-g', str(16), + output_file_path, + ] + subprocess.run( + command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + except Exception as e: + raise e + + +def _compress(input_output_pair, fps=3, size=224, file_type='image'): + if file_type == 'image': + _compress_images(input_output_pair, size) + elif file_type == 'video': + _compress_videos(input_output_pair, size, fps) + + +def prepare_input_output_pairs(input_root, + output_root, + input_file_list_path=None): + # filename list in `input_file_list_path` can be created very fast using `ls -U . >> ../video_filenames.txt` # noqa: E501 + if input_file_list_path: + with open(input_file_list_path, 'r') as f: + filenames = [s.strip() for s in f.readlines()] + else: + filenames = [ + video_path.name for video_path in Path(input_root).glob('*.mp4') + ] + print(f'There are {len(filenames)} video/images files loaded from list.') + input_file_path_list = [] + output_file_path_list = [] + for e in tqdm(filenames, desc='find un-processed videos/images'): + input_file_path = join(input_root, e) + output_file_path = join(output_root, e) + if not exists(output_file_path): + input_file_path_list.append(input_file_path) + output_file_path_list.append(output_file_path) + return input_file_path_list, output_file_path_list + + +def run_compress(): + parser = argparse.ArgumentParser( + description='Compress videos/images for speed-up') + parser.add_argument( + '--input_root', type=str, help='input root', required=True) + parser.add_argument( + '--input_file_list_path', + type=str, + default=None, + help='list of video filenames under args.input_root, it can be ' + 'created efficiently with `ls -U /path/to/video >> /path/to/video_filenames.txt`' # noqa: E501 + ) + parser.add_argument( + '--output_root', type=str, help='output root', required=True) + parser.add_argument( + '--size', + type=int, + default=224, + help='shorter side size, aspect ratio is kept') + parser.add_argument('--num_workers', type=int, default=24, help='#workers') + parser.add_argument( + '--fps', + type=int, + default=3, + help='fps for output video, ignored if file_type == image') + parser.add_argument( + '--file_type', + type=str, + choices=['image', 'video'], + help='input file type') + args = parser.parse_args() + + # set paths + input_root = args.input_root + output_root = args.output_root + assert input_root != output_root + if not exists(output_root): + os.makedirs(output_root, exist_ok=True) + + # prepare and find un-processed + input_file_path_list, output_file_path_list = prepare_input_output_pairs( + input_root, + output_root, + input_file_list_path=args.input_file_list_path, + ) + print(f'input_file_path_list[:3] {input_file_path_list[:3]}') + print(f'output_file_path_list[:3] {output_file_path_list[:3]}') + print('Total videos/images need to process: {}'.format( + len(input_file_path_list))) + + # start parallel jobs + num_cores = cpu_count() + num_workers = args.num_workers + print( + f'Begin with {num_cores}-core logical processor, {num_workers} workers' + ) + compress = partial( + _compress, fps=args.fps, size=args.size, file_type=args.file_type) + input_pairs = list(zip(input_file_path_list, output_file_path_list)) + with Pool(num_workers) as pool, tqdm( + total=len(input_file_path_list), + desc='re-encoding videos/images') as pbar: + for idx, _ in enumerate( + pool.imap_unordered(compress, input_pairs, chunksize=32)): + pbar.update(1) + + # copy-paste failed files + print('Compress finished, copy-paste failed files...') + copy_count = 0 + for input_file_path, output_file_path in zip(input_file_path_list, + output_file_path_list): + if exists(input_file_path): + if exists(output_file_path) is False or os.path.getsize( + output_file_path) < 1.: + copy_count += 1 + shutil.copyfile(input_file_path, output_file_path) + print('Copy and replace file: {}'.format(output_file_path)) + print(f'copy_count {copy_count}') + + +if __name__ == '__main__': + run_compress() diff --git a/tools/data/msrvtt/compress_msrvtt.sh b/tools/data/msrvtt/compress_msrvtt.sh new file mode 100644 index 0000000000..18822ce312 --- /dev/null +++ b/tools/data/msrvtt/compress_msrvtt.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +FPS=2 +SIZE=224 +DATA_DIR="../../../data/msrvtt/videos" +OUT_DIR="../../../data/msrvtt/videos_2fps_224" + +python compress.py \ + --input_root=${DATA_DIR} --output_root=${OUT_DIR} \ + --fps=${FPS} --size=${SIZE} --file_type=video --num_workers 24 diff --git a/tools/data/msrvtt/download_msrvtt.sh b/tools/data/msrvtt/download_msrvtt.sh new file mode 100644 index 0000000000..6ae40d942d --- /dev/null +++ b/tools/data/msrvtt/download_msrvtt.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +DATA_DIR="../../../data/msrvtt" +mkdir -p ${DATA_DIR} + +if [ -f "MSRVTT.zip" ]; then + echo "MSRVTT.zip exists, skip downloading!" +else + echo "Downloading MSRVTT.zip." + wget https://www.robots.ox.ac.uk/~maxbain/frozen-in-time/data/MSRVTT.zip +fi + +echo "Processing videos started." +unzip -q MSRVTT.zip -d ${DATA_DIR} +mkdir -p "${DATA_DIR}/videos/" && find "${DATA_DIR}/MSRVTT/videos/all" -name "video*.mp4" -exec mv {} "${DATA_DIR}/videos/" \; +echo "Processing videos completed." + +rm -rf "${DATA_DIR}/MSRVTT" +rm -rf "${DATA_DIR}/msrvtt_data" +rm msrvtt_data.zip +rm MSRVTT.zip +echo "The preparation of the msrvtt dataset has been successfully completed." diff --git a/tools/data/video_retrieval/README_zh-CN.md b/tools/data/video_retrieval/README_zh-CN.md index 1d4374daea..1814ff36e2 100644 --- a/tools/data/video_retrieval/README_zh-CN.md +++ b/tools/data/video_retrieval/README_zh-CN.md @@ -34,7 +34,7 @@ bash prepare_msrvtt.sh ``` -ๅฎŒๅœบไธŠ่ฟฐๅ‡†ๅค‡ๆญฅ้ชคๅŽ๏ผŒๆ–‡ไปถ็›ฎๅฝ•ๅฆ‚ไธ‹๏ผš +ๅฎŒๆˆไธŠ่ฟฐๅ‡†ๅค‡ๆญฅ้ชคๅŽ๏ผŒๆ–‡ไปถ็›ฎๅฝ•ๅฆ‚ไธ‹๏ผš ``` mmaction2 From 17b88a327f7089c64e217dfd10d2d6ab1652de9d Mon Sep 17 00:00:00 2001 From: wxDai Date: Thu, 12 Oct 2023 11:53:31 +0800 Subject: [PATCH 20/24] [Enhance] Support the Training of ActionClip (#2620) --- projects/actionclip/README.md | 34 +++- ...6-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py | 162 ++++++++++++++++++ ...2-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py | 162 ++++++++++++++++++ projects/actionclip/models/actionclip.py | 105 +++++++++--- 4 files changed, 437 insertions(+), 26 deletions(-) create mode 100644 projects/actionclip/configs/actionclip_vit-base-p16-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py create mode 100644 projects/actionclip/configs/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py diff --git a/projects/actionclip/README.md b/projects/actionclip/README.md index df694fd538..ffe14a4cae 100644 --- a/projects/actionclip/README.md +++ b/projects/actionclip/README.md @@ -46,24 +46,45 @@ Create a symbolic link from `$MMACTION2/data` to `./data` in the current directo ln -s ../../data ./data ``` +### Training commands + +**To train with single GPU:** + +```bash +mim train mmaction configs/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py +``` + +**To train with multiple GPUs:** + +```bash +mim train mmaction configs/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py --launcher pytorch --gpus 8 +``` + +**To train with multiple GPUs by slurm:** + +```bash +mim train mmaction configs/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py --launcher slurm \ + --gpus 8 --gpus-per-node 8 --partition $PARTITION +``` + ### Testing commands **To test with single GPU:** ```bash -mim test mmaction configs/actionclip_vit-base-p32-res224-clip-pre_1x1x8_k400-rgb.py --checkpoint $CHECKPOINT +mim test mmaction configs/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py --checkpoint $CHECKPOINT ``` **To test with multiple GPUs:** ```bash -mim test mmaction configs/actionclip_vit-base-p32-res224-clip-pre_1x1x8_k400-rgb.py --checkpoint $CHECKPOINT --launcher pytorch --gpus 8 +mim test mmaction configs/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py --checkpoint $CHECKPOINT --launcher pytorch --gpus 8 ``` **To test with multiple GPUs by slurm:** ```bash -mim test mmaction configs/actionclip_vit-base-p32-res224-clip-pre_1x1x8_k400-rgb.py --checkpoint $CHECKPOINT --launcher slurm \ +mim test mmaction configs/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py --checkpoint $CHECKPOINT --launcher slurm \ --gpus 8 --gpus-per-node 8 --partition $PARTITION ``` @@ -80,6 +101,13 @@ mim test mmaction configs/actionclip_vit-base-p32-res224-clip-pre_1x1x8_k400-rgb \[1\] The models are ported from the repo [ActionCLIP](https://github.com/sallymmx/ActionCLIP) and tested on our data. Currently, we only support the testing of ActionCLIP models. Due to the variation in testing data, our reported test accuracy differs from that of the original repository (on average, it is lower by one point). Please refer to this [issue](https://github.com/sallymmx/ActionCLIP/issues/14) for more details. +### Kinetics400 (Trained on Our K400 dataset) + +| frame sampling strategy | gpus | backbone | top1 acc | top5 acc | testing protocol | config | ckpt | log | +| :---------------------: | :--: | :------: | :------: | :------: | :---------------: | :-------------------------------------------: | :------------------------------------------: | :-----------------------------------------: | +| 1x1x8 | 8 | ViT-B/32 | 77.5 | 93.2 | 8 clips x 1 crop | [config](./configs/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/projects/actionclip/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb_20230801-8535b794.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/projects/actionclip/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.log) | +| 1x1x8 | 8 | ViT-B/16 | 81.3 | 95.2 | 8 clips x 1 crop | [config](./configs/actionclip_vit-base-p16-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/projects/actionclip/actionclip_vit-base-p16-res224-clip-pre_g8xb16_1x1x8_k400-rgb/actionclip_vit-base-p16-res224-clip-pre_g8xb16_1x1x8_k400-rgb_20230801-b307a0cd.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/projects/actionclip/actionclip_vit-base-p16-res224-clip-pre_g8xb16_1x1x8_k400-rgb/actionclip_vit-base-p16-res224-clip-pre_g8xb16_1x1x8_k400-rgb.log) | + ## Zero-Shot Prediction We offer two methods for zero-shot prediction as follows. The `test.mp4` can be downloaded from [here](https://github-production-user-asset-6210df.s3.amazonaws.com/58767402/237333525-89ebee9a-573e-4e27-9047-0ad6422fa82f.mp4). diff --git a/projects/actionclip/configs/actionclip_vit-base-p16-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py b/projects/actionclip/configs/actionclip_vit-base-p16-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py new file mode 100644 index 0000000000..732fd6fac0 --- /dev/null +++ b/projects/actionclip/configs/actionclip_vit-base-p16-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py @@ -0,0 +1,162 @@ +custom_imports = dict(imports='models') + +num_segs = 8 + +model = dict( + type='ActionClip', + clip_arch='ViT-B/16', + num_adapter_segs=num_segs, + num_adapter_layers=6, + to_float32=True, + labels_or_label_file='configs/label_map_k400.txt', + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[122.771, 116.746, 104.093], + std=[68.500, 66.632, 70.323], + format_shape='NCHW')) + +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') +file_client_args = dict( + io_backend='petrel', + path_mapping=dict( + {'data/kinetics400/': 's3://openmmlab/datasets/action/Kinetics400/'})) + +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', clip_len=1, frame_interval=1, num_clips=num_segs), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, .875, .75, .66), + random_crop=False, + num_fixed_crops=13, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=num_segs, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +test_pipeline = val_pipeline + +train_dataloader = dict( + batch_size=16, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=16, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=50, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-6, betas=(0.9, 0.98), eps=1e-08, weight_decay=0.2), + paramwise_cfg=dict(custom_keys=dict(adapter=dict(lr_mult=10)))) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.01, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=45, + eta_min=0, + by_epoch=True, + begin=5, + end=50, + convert_to_iter_based=True) +] + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) + +default_scope = 'mmaction' + +default_hooks = dict( + runtime_info=dict(type='RuntimeInfoHook'), + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=100, ignore_last=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', interval=1, save_best='auto', max_keep_ckpts=5), + sampler_seed=dict(type='DistSamplerSeedHook'), + sync_buffers=dict(type='SyncBuffersHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) + +log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict(type='ActionVisualizer', vis_backends=vis_backends) + +log_level = 'INFO' +load_from = None +resume = False diff --git a/projects/actionclip/configs/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py b/projects/actionclip/configs/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py new file mode 100644 index 0000000000..0991730c71 --- /dev/null +++ b/projects/actionclip/configs/actionclip_vit-base-p32-res224-clip-pre_g8xb16_1x1x8_k400-rgb.py @@ -0,0 +1,162 @@ +custom_imports = dict(imports='models') + +num_segs = 8 + +model = dict( + type='ActionClip', + clip_arch='ViT-B/32', + num_adapter_segs=num_segs, + num_adapter_layers=6, + to_float32=True, + labels_or_label_file='configs/label_map_k400.txt', + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[122.771, 116.746, 104.093], + std=[68.500, 66.632, 70.323], + format_shape='NCHW')) + +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') +file_client_args = dict( + io_backend='petrel', + path_mapping=dict( + {'data/kinetics400/': 's3://openmmlab/datasets/action/Kinetics400/'})) + +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', clip_len=1, frame_interval=1, num_clips=num_segs), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, .875, .75, .66), + random_crop=False, + num_fixed_crops=13, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=num_segs, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +test_pipeline = val_pipeline + +train_dataloader = dict( + batch_size=16, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=16, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=50, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-6, betas=(0.9, 0.98), eps=1e-08, weight_decay=0.2), + paramwise_cfg=dict(custom_keys=dict(adapter=dict(lr_mult=10)))) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.01, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=45, + eta_min=0, + by_epoch=True, + begin=5, + end=50, + convert_to_iter_based=True) +] + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) + +default_scope = 'mmaction' + +default_hooks = dict( + runtime_info=dict(type='RuntimeInfoHook'), + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=100, ignore_last=False), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict( + type='CheckpointHook', interval=1, save_best='auto', max_keep_ckpts=5), + sampler_seed=dict(type='DistSamplerSeedHook'), + sync_buffers=dict(type='SyncBuffersHook')) + +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) + +log_processor = dict(type='LogProcessor', window_size=20, by_epoch=True) + +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict(type='ActionVisualizer', vis_backends=vis_backends) + +log_level = 'INFO' +load_from = None +resume = False diff --git a/projects/actionclip/models/actionclip.py b/projects/actionclip/models/actionclip.py index 923b78c68f..6b125b40b2 100644 --- a/projects/actionclip/models/actionclip.py +++ b/projects/actionclip/models/actionclip.py @@ -1,9 +1,11 @@ -from typing import Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Tuple, Union import clip import mmengine +import numpy as np import torch import torch.nn.functional as F +from mmengine.dist import all_gather, get_rank from mmengine.model import BaseModel from mmengine.structures import LabelData @@ -11,7 +13,23 @@ from .adapter import TransformerAdapter -def text_prompt(labels_or_label_file, template=None): +class GatherLayer(torch.autograd.Function): + + @staticmethod + def forward(ctx: Any, input: torch.Tensor) -> Tuple[List]: + ctx.save_for_backward(input) + output = all_gather(input) + return tuple(output) + + @staticmethod + def backward(ctx: Any, *grads: torch.Tensor) -> torch.Tensor: + input, = ctx.saved_tensors + grad_out = torch.zeros_like(input) + grad_out[:] = grads[get_rank()] + return grad_out + + +def text_prompt(labels_or_label_file, templates_or_template_file=None): if isinstance(labels_or_label_file, str): labels = mmengine.list_from_file(labels_or_label_file) elif isinstance(labels_or_label_file, list): @@ -20,8 +38,8 @@ def text_prompt(labels_or_label_file, template=None): raise ValueError(f'`labels_or_label_file` must be `list` or `str`, ' f'but got {type(labels_or_label_file)}') - if template is None: - template = [ + if templates_or_template_file is None: + templates = [ 'a photo of action {}', 'a picture of action {}', 'Human action of {}', '{}, an action', '{} this is an action', '{}, a video of action', 'Playing action of {}', '{}', @@ -30,15 +48,15 @@ def text_prompt(labels_or_label_file, template=None): 'Video classification of {}', 'A video of {}', 'The man is {}', 'The woman is {}' ] - elif isinstance(template, str): - template = [template] - elif not mmengine.is_seq_of(template, str): + elif isinstance(templates_or_template_file, str): + templates = mmengine.list_from_file(templates_or_template_file) + elif not mmengine.is_seq_of(templates_or_template_file, str): raise ValueError(f'`template` must be list of `str`, `str` or `None`, ' - f'but got {type(template)}') + f'but got {type(templates_or_template_file)}') - num_prompt = len(template) + num_prompt = len(templates) prompt = torch.cat( - [clip.tokenize(t.format(c)) for t in template for c in labels]) + [clip.tokenize(t.format(c)) for t in templates for c in labels]) return prompt, num_prompt @@ -49,18 +67,25 @@ def __init__(self, clip_arch: str, num_adapter_segs: int, num_adapter_layers: int = 6, + to_float32: bool = False, labels_or_label_file: Optional[Union[List[str], str]] = None, - template: Optional[Union[List[str], str]] = None, - data_preprocessor: Optional[Dict] = None): + templates_or_template_file: Optional[Union[List[str], + str]] = None, + data_preprocessor: Optional[Dict] = None, + loss: Dict = dict(type='CrossEntropyLoss', loss_weight=0.5)): super(ActionClip, self).__init__(data_preprocessor=data_preprocessor) - self.clip = clip.load(clip_arch)[0] + self.clip = clip.load(clip_arch, device='cpu')[0] + if to_float32: + self.clip.float() + self.adapter = TransformerAdapter(self.clip, num_adapter_segs, num_adapter_layers) + self.loss = MODELS.build(loss) + if labels_or_label_file is not None: - self.prompt, self.num_prompt = text_prompt(labels_or_label_file, - template) - self.text_features = None + self.prompt, self.num_prompt = text_prompt( + labels_or_label_file, templates_or_template_file) def encode_video(self, video): b, n, c, h, w = video.shape @@ -95,14 +120,13 @@ def forward(self, bsz = len(data_samples) num_views = video_features.shape[0] // bsz - if self.text_features is None: - text_features = self.encode_text(self.prompt.to(inputs.device)) - self.text_features = text_features / text_features.norm( - dim=-1, keepdim=True) + text_features = self.encode_text(self.prompt.to(inputs.device)) + text_features = text_features / text_features.norm( + dim=-1, keepdim=True) # (bsz*num_views, num_prompt, num_classes) -> # (bsz, num_views*num_prompt, num_classes) - similarity = (100.0 * video_features @ self.text_features.T). \ + similarity = (100.0 * video_features @ text_features.T). \ view(bsz, num_views * self.num_prompt, -1) cls_scores = F.softmax(similarity, dim=2).mean(dim=1) @@ -112,6 +136,41 @@ def forward(self, return data_samples + elif mode == 'loss': + video_features = self.encode_video(inputs) + video_features = video_features / video_features.norm( + dim=-1, keepdim=True) + + text_id = np.random.randint( + self.num_prompt, size=len(data_samples)) + real_labels = [x.gt_labels.item.item() for x in data_samples] + selected_prompt = self.prompt.view( + self.num_prompt, -1, + self.prompt.shape[-1])[text_id, real_labels].to(inputs.device) + + text_features = self.encode_text(selected_prompt) + text_features = text_features / text_features.norm( + dim=-1, keepdim=True) + + video_features = torch.cat( + GatherLayer.apply(video_features), dim=0) + text_features = torch.cat(GatherLayer.apply(text_features), dim=0) + + logit_scale = self.clip.logit_scale.exp() + logits_per_video = logit_scale * video_features @ text_features.t() + logits_per_text = logits_per_video.t() + labels = torch.arange(logits_per_video.shape[0]).to( + logit_scale.device) + + sim_loss_v2t = self.loss(logits_per_video, labels) + sim_loss_t2v = self.loss(logits_per_text, labels) + + losses = dict() + losses['sim_loss_v2t'] = sim_loss_v2t + losses['sim_loss_t2v'] = sim_loss_t2v + return losses + else: - raise RuntimeError(f'Invalid mode "{mode}". ' - 'Only supports `predict` and `tensor` mode. ') + raise RuntimeError( + f'Invalid mode "{mode}". ' + 'Only supports `predict`, `loss` and `tensor` mode. ') From a7f77b753aee12a2916be445b6eb69136ca18914 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Wed, 11 Oct 2023 23:54:00 -0400 Subject: [PATCH 21/24] [Feature] support DRN (#2668) --- configs/localization/drn/README.md | 84 ++++++ .../drn_2xb16-4096-10e_c3d-feature_first.py | 115 ++++++++ .../drn_2xb16-4096-10e_c3d-feature_second.py | 110 ++++++++ .../drn_2xb16-4096-10e_c3d-feature_third.py | 110 ++++++++ configs/localization/drn/metafile.yml | 26 ++ mmaction/datasets/__init__.py | 3 +- mmaction/datasets/charades_sta_dataset.py | 124 +++++++++ mmaction/evaluation/metrics/__init__.py | 3 +- .../metrics/video_grounding_metric.py | 66 +++++ mmaction/models/localizers/__init__.py | 3 +- mmaction/models/localizers/drn/drn.py | 260 ++++++++++++++++++ .../models/localizers/drn/drn_utils/FPN.py | 44 +++ .../localizers/drn/drn_utils/__init__.py | 7 + .../localizers/drn/drn_utils/backbone.py | 48 ++++ .../models/localizers/drn/drn_utils/fcos.py | 192 +++++++++++++ .../localizers/drn/drn_utils/inference.py | 212 ++++++++++++++ .../drn/drn_utils/language_module.py | 92 +++++++ .../models/localizers/drn/drn_utils/loss.py | 240 ++++++++++++++++ tools/data/charades-sta/README.md | 59 ++++ .../data/charades-sta/download_annotations.sh | 18 ++ 20 files changed, 1813 insertions(+), 3 deletions(-) create mode 100644 configs/localization/drn/README.md create mode 100644 configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_first.py create mode 100644 configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_second.py create mode 100644 configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_third.py create mode 100644 configs/localization/drn/metafile.yml create mode 100644 mmaction/datasets/charades_sta_dataset.py create mode 100644 mmaction/evaluation/metrics/video_grounding_metric.py create mode 100644 mmaction/models/localizers/drn/drn.py create mode 100644 mmaction/models/localizers/drn/drn_utils/FPN.py create mode 100644 mmaction/models/localizers/drn/drn_utils/__init__.py create mode 100644 mmaction/models/localizers/drn/drn_utils/backbone.py create mode 100644 mmaction/models/localizers/drn/drn_utils/fcos.py create mode 100644 mmaction/models/localizers/drn/drn_utils/inference.py create mode 100644 mmaction/models/localizers/drn/drn_utils/language_module.py create mode 100644 mmaction/models/localizers/drn/drn_utils/loss.py create mode 100644 tools/data/charades-sta/README.md create mode 100644 tools/data/charades-sta/download_annotations.sh diff --git a/configs/localization/drn/README.md b/configs/localization/drn/README.md new file mode 100644 index 0000000000..7eb5b3edda --- /dev/null +++ b/configs/localization/drn/README.md @@ -0,0 +1,84 @@ +# DRN + +[Dense Regression Network for Video Grounding](https://openaccess.thecvf.com/content_CVPR_2020/papers/Zeng_Dense_Regression_Network_for_Video_Grounding_CVPR_2020_paper.pdf) + + + +## Abstract + + + +We address the problem of video grounding from natural language queries. The key challenge in this task is that one training video might only contain a few annotated starting/ending frames that can be used as positive examples for model training. Most conventional approaches directly train a binary classifier using such imbalance data, thus achieving inferior results. The key idea of this paper is to use the distances between the frame within the ground truth and the starting (ending) frame as dense supervisions to improve the video grounding accuracy. Specifically, we design a novel dense regression network (DRN) to regress the distances from each frame to the starting (ending) frame of the video segment described by the query. We also propose a simple but effective IoU regression head module to explicitly consider the localization quality of the grounding results (i.e., the IoU between the predicted location and the ground truth). Experimental results show that our approach significantly outperforms state-of-the-arts on three datasets (i.e., Charades-STA, ActivityNet-Captions, and TACoS). + + + +
+ +
+ +## Results and Models + +### Charades STA C3D feature + +| feature | gpus | pretrain | Recall@Top1(IoU=0.5) | Recall@Top5(IoU=0.5) | config | ckpt | log | +| :-----: | :--: | :------: | :------------------: | :------------------: | :----------------------------------------------: | :---------------------------------------------: | :--------------------------------------------: | +| C3D | 2 | None | 47.04 | 84.57 | [config](configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_third.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/localization/drn/drn_2xb16-4096-10e_c3d-feature_20230809-ec0429a6.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/drn_2xb16-4096-10e_c3d-feature.log) | + +For more details on data preparation, you can refer to [Charades STA Data Preparation](/tools/data/charades-sta/README.md). + +## Train + +The training of DRN has three stages. Following the official paper, the second and the third stage loads the best checkpoint from previous stage. + +The first stage training: + +```shell +bash tools/dist_train.sh configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_first.py 2 +``` + +The second stage training: + +```shell +BEST_CKPT=work_dirs/drn_2xb16-4096-10e_c3d-feature_first/SOME.PTH +bash tools/dist_train.sh configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_second.py 2 --cfg-options load_from=${BEST_CKPT} +``` + +The third stage training: + +```shell +BEST_CKPT=work_dirs/drn_2xb16-4096-10e_c3d-feature_second/SOME.PTH +bash tools/dist_train.sh configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_third.py 2 --cfg-options load_from=${BEST_CKPT} +``` + +## Test + +Test DRN on Charades STA C3D feature: + +```shell +python3 tools/test.py configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_third.py CHECKPOINT.PTH +``` + +For more details, you can refer to the **Testing** part in the [Training and Test Tutorial](/docs/en/user_guides/train_test.md). + +## Citation + +```BibTeX +@inproceedings{DRN2020CVPR, + author = {Runhao, Zeng and Haoming, Xu and Wenbing, Huang and Peihao, Chen and Mingkui, Tan and Chuang Gan}, + title = {Dense Regression Network for Video Grounding}, + booktitle = {CVPR}, + year = {2020}, +} +``` + + + +```BibTeX +@inproceedings{gao2017tall, + title={Tall: Temporal activity localization via language query}, + author={Gao, Jiyang and Sun, Chen and Yang, Zhenheng and Nevatia, Ram}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + pages={5267--5275}, + year={2017} +} +``` diff --git a/configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_first.py b/configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_first.py new file mode 100644 index 0000000000..e66076e962 --- /dev/null +++ b/configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_first.py @@ -0,0 +1,115 @@ +_base_ = ['../../_base_/default_runtime.py'] + +# model settings +model = dict( + type='DRN', + vocab_size=1301, + feature_dim=4096, + embed_dim=300, + hidden_dim=512, + bidirection=True, + first_output_dim=256, + fpn_feature_dim=512, + lstm_layers=1, + graph_node_features=1024, + fcos_pre_nms_top_n=32, + fcos_inference_thr=0.05, + fcos_prior_prob=0.01, + focal_alpha=0.25, + focal_gamma=2.0, + fpn_stride=[1, 2, 4], + fcos_nms_thr=0.6, + fcos_conv_layers=1, + fcos_num_class=2, + is_first_stage=True, + is_second_stage=False) + +# dataset settings +dataset_type = 'CharadesSTADataset' +root = 'data/CharadesSTA' +data_root = f'{root}/C3D_unit16_overlap0.5_merged/' +data_root_val = f'{root}/C3D_unit16_overlap0.5_merged/' +ann_file_train = f'{root}/Charades_sta_train.txt' +ann_file_val = f'{root}/Charades_sta_test.txt' +ann_file_test = f'{root}/Charades_sta_test.txt' + +word2id_file = f'{root}/Charades_word2id.json' +fps_file = f'{root}/Charades_fps_dict.json' +duration_file = f'{root}/Charades_duration.json' +num_frames_file = f'{root}/Charades_frames_info.json' +window_size = 16 +ft_overlap = 0.5 + +train_pipeline = [ + dict( + type='PackLocalizationInputs', + keys=('gt_bbox', 'proposals'), + meta_keys=('vid_name', 'query_tokens', 'query_length', 'num_proposals', + 'num_frames')) +] + +val_pipeline = train_pipeline +test_pipeline = val_pipeline + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + drop_last=True, + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline, + word2id_file=word2id_file, + fps_file=fps_file, + duration_file=duration_file, + num_frames_file=num_frames_file, + window_size=window_size, + ft_overlap=ft_overlap), +) + +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + drop_last=True, + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root), + pipeline=val_pipeline, + word2id_file=word2id_file, + fps_file=fps_file, + duration_file=duration_file, + num_frames_file=num_frames_file, + window_size=window_size, + ft_overlap=ft_overlap), +) +test_dataloader = val_dataloader + +max_epochs = 10 +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_begin=1, + val_interval=1) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +val_evaluator = dict(type='RecallatTopK', topK_list=(1, 5), threshold=0.5) +test_evaluator = val_evaluator + +optim_wrapper = dict( + optimizer=dict(type='Adam', lr=1e-3), + clip_grad=dict(max_norm=5, norm_type=2), +) + +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=True, begin=0, end=5), +] + +find_unused_parameters = True diff --git a/configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_second.py b/configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_second.py new file mode 100644 index 0000000000..46a671db4c --- /dev/null +++ b/configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_second.py @@ -0,0 +1,110 @@ +_base_ = ['../../_base_/default_runtime.py'] + +# model settings +model = dict( + type='DRN', + vocab_size=1301, + feature_dim=4096, + embed_dim=300, + hidden_dim=512, + bidirection=True, + first_output_dim=256, + fpn_feature_dim=512, + lstm_layers=1, + graph_node_features=1024, + fcos_pre_nms_top_n=32, + fcos_inference_thr=0.05, + fcos_prior_prob=0.01, + focal_alpha=0.25, + focal_gamma=2.0, + fpn_stride=[1, 2, 4], + fcos_nms_thr=0.6, + fcos_conv_layers=1, + fcos_num_class=2, + is_first_stage=False, + is_second_stage=True) + +# dataset settings +dataset_type = 'CharadesSTADataset' +root = 'data/CharadesSTA' +data_root = f'{root}/C3D_unit16_overlap0.5_merged/' +data_root_val = f'{root}/C3D_unit16_overlap0.5_merged/' +ann_file_train = f'{root}/Charades_sta_train.txt' +ann_file_val = f'{root}/Charades_sta_test.txt' +ann_file_test = f'{root}/Charades_sta_test.txt' + +word2id_file = f'{root}/Charades_word2id.json' +fps_file = f'{root}/Charades_fps_dict.json' +duration_file = f'{root}/Charades_duration.json' +num_frames_file = f'{root}/Charades_frames_info.json' +window_size = 16 +ft_overlap = 0.5 + +train_pipeline = [ + dict( + type='PackLocalizationInputs', + keys=('gt_bbox', 'proposals'), + meta_keys=('vid_name', 'query_tokens', 'query_length', 'num_proposals', + 'num_frames')) +] + +val_pipeline = train_pipeline +test_pipeline = val_pipeline + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + drop_last=True, + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline, + word2id_file=word2id_file, + fps_file=fps_file, + duration_file=duration_file, + num_frames_file=num_frames_file, + window_size=window_size, + ft_overlap=ft_overlap), +) + +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + drop_last=True, + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root), + pipeline=val_pipeline, + word2id_file=word2id_file, + fps_file=fps_file, + duration_file=duration_file, + num_frames_file=num_frames_file, + window_size=window_size, + ft_overlap=ft_overlap), +) +test_dataloader = val_dataloader + +max_epochs = 10 +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_begin=1, + val_interval=1) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +val_evaluator = dict(type='RecallatTopK', topK_list=(1, 5), threshold=0.5) +test_evaluator = val_evaluator + +optim_wrapper = dict( + optimizer=dict(type='Adam', lr=1e-5), + clip_grad=dict(max_norm=5, norm_type=2)) + +find_unused_parameters = True diff --git a/configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_third.py b/configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_third.py new file mode 100644 index 0000000000..2a286415bc --- /dev/null +++ b/configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_third.py @@ -0,0 +1,110 @@ +_base_ = ['../../_base_/default_runtime.py'] + +# model settings +model = dict( + type='DRN', + vocab_size=1301, + feature_dim=4096, + embed_dim=300, + hidden_dim=512, + bidirection=True, + first_output_dim=256, + fpn_feature_dim=512, + lstm_layers=1, + graph_node_features=1024, + fcos_pre_nms_top_n=32, + fcos_inference_thr=0.05, + fcos_prior_prob=0.01, + focal_alpha=0.25, + focal_gamma=2.0, + fpn_stride=[1, 2, 4], + fcos_nms_thr=0.6, + fcos_conv_layers=1, + fcos_num_class=2, + is_first_stage=False, + is_second_stage=False) + +# dataset settings +dataset_type = 'CharadesSTADataset' +root = 'data/CharadesSTA' +data_root = f'{root}/C3D_unit16_overlap0.5_merged/' +data_root_val = f'{root}/C3D_unit16_overlap0.5_merged/' +ann_file_train = f'{root}/Charades_sta_train.txt' +ann_file_val = f'{root}/Charades_sta_test.txt' +ann_file_test = f'{root}/Charades_sta_test.txt' + +word2id_file = f'{root}/Charades_word2id.json' +fps_file = f'{root}/Charades_fps_dict.json' +duration_file = f'{root}/Charades_duration.json' +num_frames_file = f'{root}/Charades_frames_info.json' +window_size = 16 +ft_overlap = 0.5 + +train_pipeline = [ + dict( + type='PackLocalizationInputs', + keys=('gt_bbox', 'proposals'), + meta_keys=('vid_name', 'query_tokens', 'query_length', 'num_proposals', + 'num_frames')) +] + +val_pipeline = train_pipeline +test_pipeline = val_pipeline + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + drop_last=True, + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline, + word2id_file=word2id_file, + fps_file=fps_file, + duration_file=duration_file, + num_frames_file=num_frames_file, + window_size=window_size, + ft_overlap=ft_overlap), +) + +val_dataloader = dict( + batch_size=1, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + drop_last=True, + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root), + pipeline=val_pipeline, + word2id_file=word2id_file, + fps_file=fps_file, + duration_file=duration_file, + num_frames_file=num_frames_file, + window_size=window_size, + ft_overlap=ft_overlap), +) +test_dataloader = val_dataloader + +max_epochs = 10 +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_begin=1, + val_interval=1) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +val_evaluator = dict(type='RecallatTopK', topK_list=(1, 5), threshold=0.5) +test_evaluator = val_evaluator + +optim_wrapper = dict( + optimizer=dict(type='Adam', lr=1e-6), + clip_grad=dict(max_norm=5, norm_type=2)) + +find_unused_parameters = True diff --git a/configs/localization/drn/metafile.yml b/configs/localization/drn/metafile.yml new file mode 100644 index 0000000000..d092668b1e --- /dev/null +++ b/configs/localization/drn/metafile.yml @@ -0,0 +1,26 @@ +Collections: +- Name: DRN + README: configs/localization/drn/README.md + Paper: + URL: https://openaccess.thecvf.com/content_CVPR_2020/papers/Zeng_Dense_Regression_Network_for_Video_Grounding_CVPR_2020_paper.pdf + Title: "Dense Regression Network for Video Grounding" + +Models: + - Name: drn_2xb16-4096-10e_c3d-feature_third + Config: configs/localization/drn/drn_2xb16-4096-10e_c3d-feature_third.py + In Collection: DRN + Metadata: + Batch Size: 16 + Epochs: 10 + Training Data: Charades STA + Training Resources: 2 GPUs + feature: C3D + Modality: RGB + Results: + - Dataset: Charades STA + Task: Video Grounding + Metrics: + Recall@Top1(IoU=0.5): 47.04 + Recall@Top5(IoU=0.5): 84.57 + Training Log: https://download.openmmlab.com/mmaction/v1.0/drn_2xb16-4096-10e_c3d-feature.log + Weights: https://download.openmmlab.com/mmaction/v1.0/localization/drn/drn_2xb16-4096-10e_c3d-feature_20230809-ec0429a6.pth diff --git a/mmaction/datasets/__init__.py b/mmaction/datasets/__init__.py index cc838f8f31..eef565309d 100644 --- a/mmaction/datasets/__init__.py +++ b/mmaction/datasets/__init__.py @@ -3,6 +3,7 @@ from .audio_dataset import AudioDataset from .ava_dataset import AVADataset, AVAKineticsDataset from .base import BaseActionDataset +from .charades_sta_dataset import CharadesSTADataset from .msrvtt_datasets import MSRVTTVQA, MSRVTTVQAMC, MSRVTTRetrieval from .pose_dataset import PoseDataset from .rawframe_dataset import RawframeDataset @@ -15,5 +16,5 @@ 'AVADataset', 'AVAKineticsDataset', 'ActivityNetDataset', 'AudioDataset', 'BaseActionDataset', 'PoseDataset', 'RawframeDataset', 'RepeatAugDataset', 'VideoDataset', 'repeat_pseudo_collate', 'VideoTextDataset', - 'MSRVTTRetrieval', 'MSRVTTVQA', 'MSRVTTVQAMC' + 'MSRVTTRetrieval', 'MSRVTTVQA', 'MSRVTTVQAMC', 'CharadesSTADataset' ] diff --git a/mmaction/datasets/charades_sta_dataset.py b/mmaction/datasets/charades_sta_dataset.py new file mode 100644 index 0000000000..aca9c9a6bb --- /dev/null +++ b/mmaction/datasets/charades_sta_dataset.py @@ -0,0 +1,124 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +from typing import Callable, List, Optional, Union + +import mmengine +import numpy as np +import torch +from mmengine.fileio import exists + +from mmaction.registry import DATASETS +from mmaction.utils import ConfigType +from .base import BaseActionDataset + +try: + import nltk + nltk_imported = True +except ImportError: + nltk_imported = False + + +@DATASETS.register_module() +class CharadesSTADataset(BaseActionDataset): + + def __init__(self, + ann_file: str, + pipeline: List[Union[dict, Callable]], + word2id_file: str, + fps_file: str, + duration_file: str, + num_frames_file: str, + window_size: int, + ft_overlap: float, + data_prefix: Optional[ConfigType] = dict(video=''), + test_mode: bool = False, + **kwargs): + if not nltk_imported: + raise ImportError('nltk is required for CharadesSTADataset') + + self.fps_info = mmengine.load(fps_file) + self.duration_info = mmengine.load(duration_file) + self.num_frames = mmengine.load(num_frames_file) + self.word2id = mmengine.load(word2id_file) + self.ft_interval = int(window_size * (1 - ft_overlap)) + + super().__init__( + ann_file, + pipeline=pipeline, + data_prefix=data_prefix, + test_mode=test_mode, + **kwargs) + + def load_data_list(self) -> List[dict]: + """Load annotation file to get video information.""" + exists(self.ann_file) + data_list = [] + with open(self.ann_file) as f: + anno_database = f.readlines() + + for item in anno_database: + first_part, query_sentence = item.strip().split('##') + query_sentence = query_sentence.replace('.', '') + query_words = nltk.word_tokenize(query_sentence) + query_tokens = [self.word2id[word] for word in query_words] + query_length = len(query_tokens) + query_tokens = torch.from_numpy(np.array(query_tokens)) + + vid_name, start_time, end_time = first_part.split() + duration = float(self.duration_info[vid_name]) + fps = float(self.fps_info[vid_name]) + + gt_start_time = float(start_time) + gt_end_time = float(end_time) + + gt_bbox = (gt_start_time / duration, min(gt_end_time / duration, + 1)) + + num_frames = int(self.num_frames[vid_name]) + proposal_frames = self.get_proposals(num_frames) + + proposals = proposal_frames / num_frames + proposals = torch.from_numpy(proposals) + proposal_indexes = proposal_frames / self.ft_interval + proposal_indexes = proposal_indexes.astype(np.int32) + + info = dict( + vid_name=vid_name, + fps=fps, + num_frames=num_frames, + duration=duration, + query_tokens=query_tokens, + query_length=query_length, + gt_start_time=gt_start_time, + gt_end_time=gt_end_time, + gt_bbox=gt_bbox, + proposals=proposals, + num_proposals=proposals.shape[0], + proposal_indexes=proposal_indexes) + data_list.append(info) + return data_list + + def get_proposals(self, num_frames): + proposals = (num_frames - 1) / 32 * np.arange(33) + proposals = proposals.astype(np.int32) + proposals = np.stack([proposals[:-1], proposals[1:]]).T + return proposals + + def get_data_info(self, idx: int) -> dict: + """Get annotation by index.""" + data_info = super().get_data_info(idx) + vid_name = data_info['vid_name'] + feature_path = os.path.join(self.data_prefix['video'], + f'{vid_name}.pt') + vid_feature = torch.load(feature_path) + proposal_feats = [] + proposal_indexes = data_info['proposal_indexes'].clip( + max=vid_feature.shape[0] - 1) + for s, e in proposal_indexes: + prop_feature, _ = vid_feature[s:e + 1].max(dim=0) + proposal_feats.append(prop_feature) + + proposal_feats = torch.stack(proposal_feats) + + data_info['raw_feature'] = proposal_feats + return data_info diff --git a/mmaction/evaluation/metrics/__init__.py b/mmaction/evaluation/metrics/__init__.py index 341ec577ce..fd50aded2e 100644 --- a/mmaction/evaluation/metrics/__init__.py +++ b/mmaction/evaluation/metrics/__init__.py @@ -5,9 +5,10 @@ from .multimodal_metric import VQAMCACC, ReportVQA, RetrievalRecall, VQAAcc from .multisports_metric import MultiSportsMetric from .retrieval_metric import RetrievalMetric +from .video_grounding_metric import RecallatTopK __all__ = [ 'AccMetric', 'AVAMetric', 'ANetMetric', 'ConfusionMatrix', 'MultiSportsMetric', 'RetrievalMetric', 'VQAAcc', 'ReportVQA', 'VQAMCACC', - 'RetrievalRecall' + 'RetrievalRecall', 'RecallatTopK' ] diff --git a/mmaction/evaluation/metrics/video_grounding_metric.py b/mmaction/evaluation/metrics/video_grounding_metric.py new file mode 100644 index 0000000000..310db64452 --- /dev/null +++ b/mmaction/evaluation/metrics/video_grounding_metric.py @@ -0,0 +1,66 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Any, Optional, Sequence, Tuple + +from mmengine.evaluator import BaseMetric + +from mmaction.registry import METRICS + + +@METRICS.register_module() +class RecallatTopK(BaseMetric): + """ActivityNet dataset evaluation metric.""" + + def __init__(self, + topK_list: Tuple[int] = (1, 5), + threshold: float = 0.5, + collect_device: str = 'cpu', + prefix: Optional[str] = None): + super().__init__(collect_device=collect_device, prefix=prefix) + self.topK_list = topK_list + self.threshold = threshold + + def process(self, data_batch: Sequence[Tuple[Any, dict]], + predictions: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. The processed + results should be stored in ``self.results``, which will be used to + compute the metrics when all batches have been processed. + + Args: + data_batch (Sequence[Tuple[Any, dict]]): A batch of data + from the dataloader. + predictions (Sequence[dict]): A batch of outputs from + the model. + """ + for pred in predictions: + self.results.append(pred) + + def compute_metrics(self, results: list) -> dict: + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + Returns: + dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + eval_results = dict() + for topK in self.topK_list: + total = len(results) + correct = 0.0 + for result in results: + gt = result['gt'] + predictions = result['predictions'][:topK] + for prediction in predictions: + IoU = self.calculate_IoU(gt, prediction) + if IoU > self.threshold: + correct += 1 + break + acc = correct / total + eval_results[f'Recall@Top{topK}_IoU={self.threshold}'] = acc + return eval_results + + def calculate_IoU(self, i0, i1): + union = (min(i0[0], i1[0]), max(i0[1], i1[1])) + inter = (max(i0[0], i1[0]), min(i0[1], i1[1])) + iou = (inter[1] - inter[0]) / (union[1] - union[0]) + return iou diff --git a/mmaction/models/localizers/__init__.py b/mmaction/models/localizers/__init__.py index 26e016410b..debd9a16f4 100644 --- a/mmaction/models/localizers/__init__.py +++ b/mmaction/models/localizers/__init__.py @@ -1,6 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. from .bmn import BMN from .bsn import PEM, TEM +from .drn.drn import DRN from .tcanet import TCANet -__all__ = ['TEM', 'PEM', 'BMN', 'TCANet'] +__all__ = ['TEM', 'PEM', 'BMN', 'TCANet', 'DRN'] diff --git a/mmaction/models/localizers/drn/drn.py b/mmaction/models/localizers/drn/drn.py new file mode 100644 index 0000000000..869791e6bb --- /dev/null +++ b/mmaction/models/localizers/drn/drn.py @@ -0,0 +1,260 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +from mmengine.model import BaseModel + +from mmaction.registry import MODELS +from mmaction.utils import OptConfigType +from ..utils import soft_nms +from .drn_utils import FPN, Backbone, FCOSModule, QueryEncoder + + +@MODELS.register_module() +class DRN(BaseModel): + """Dense Regression Network for Video Grounding. + + Please refer `Dense Regression Network for Video Grounding + `_. + Code Reference: https://github.com/Alvin-Zeng/DRN + + Args: + vocab_size (int): number of all possible words in the query. + Defaults to 1301. + hidden_dim (int): the hidden dimension of the LSTM in the + language model. Defaults to 512. + embed_dim (int): the embedding dimension of the query. Defaults + to 300. + bidirection (bool): if True, use bi-direction LSTM in the + language model. Defaults to True. + first_output_dim (int): the output dimension of the first layer + in the backbone. Defaults to 256. + fpn_feature_dim (int): the output dimension of the FPN. Defaults + to 512. + feature_dim (int): the dimension of the video clip feature. + lstm_layers (int): the number of LSTM layers in the language model. + Defaults to 1. + fcos_pre_nms_top_n (int): value of Top-N in the FCOS module before + nms. Defaults to 32. + fcos_inference_thr (float): threshold in the FOCS inference. BBoxes + with scores higher than this threshold are regarded as positive. + Defaults to 0.05. + fcos_prior_prob (float): A prior probability of the positive bboexes. + Used to initialized the bias of the classification head. + Defaults to 0.01. + focal_alpha (float):Focal loss hyper-parameter alpha. + Defaults to 0.25. + focal_gamma (float): Focal loss hyper-parameter gamma. + Defaults to 2.0. + fpn_stride (Sequence[int]): the strides in the FPN. Defaults to + [1, 2, 4]. + fcos_nms_thr (float): NMS threshold in the FOCS module. + Defaults to 0.6. + fcos_conv_layers (int): number of convolution layers in FCOS. + Defaults to 1. + fcos_num_class (int): number of classes in FCOS. + Defaults to 2. + is_first_stage (bool): if true, the model is in the first stage + training. + is_second_stage (bool): if true, the model is in the second stage + training. + """ + + def __init__(self, + vocab_size: int = 1301, + hidden_dim: int = 512, + embed_dim: int = 300, + bidirection: bool = True, + first_output_dim: int = 256, + fpn_feature_dim: int = 512, + feature_dim: int = 4096, + lstm_layers: int = 1, + fcos_pre_nms_top_n: int = 32, + fcos_inference_thr: float = 0.05, + fcos_prior_prob: float = 0.01, + focal_alpha: float = 0.25, + focal_gamma: float = 2.0, + fpn_stride: Sequence[int] = [1, 2, 4], + fcos_nms_thr: float = 0.6, + fcos_conv_layers: int = 1, + fcos_num_class: int = 2, + is_first_stage: bool = False, + is_second_stage: bool = False, + init_cfg: OptConfigType = None, + **kwargs) -> None: + super(DRN, self).__init__(init_cfg) + + self.query_encoder = QueryEncoder( + vocab_size=vocab_size, + hidden_dim=hidden_dim, + embed_dim=embed_dim, + num_layers=lstm_layers, + bidirection=bidirection) + + channels_list = [ + (feature_dim + 256, first_output_dim, 3, 1), + (first_output_dim, first_output_dim * 2, 3, 2), + (first_output_dim * 2, first_output_dim * 4, 3, 2), + ] + self.backbone_net = Backbone(channels_list) + + self.fpn = FPN( + in_channels_list=[256, 512, 1024], out_channels=fpn_feature_dim) + + self.fcos = FCOSModule( + in_channels=fpn_feature_dim, + fcos_num_class=fcos_num_class, + fcos_conv_layers=fcos_conv_layers, + fcos_prior_prob=fcos_prior_prob, + fcos_inference_thr=fcos_inference_thr, + fcos_pre_nms_top_n=fcos_pre_nms_top_n, + fcos_nms_thr=fcos_nms_thr, + test_detections_per_img=32, + fpn_stride=fpn_stride, + focal_alpha=focal_alpha, + focal_gamma=focal_gamma, + is_first_stage=is_first_stage, + is_second_stage=is_second_stage) + + self.prop_fc = nn.Linear(feature_dim, feature_dim) + self.position_transform = nn.Linear(3, 256) + + qInput = [] + for t in range(len(channels_list)): + if t > 0: + qInput += [nn.Linear(1024, channels_list[t - 1][1])] + else: + qInput += [nn.Linear(1024, feature_dim)] + self.qInput = nn.ModuleList(qInput) + + self.is_second_stage = is_second_stage + + def forward(self, inputs, data_samples, mode, **kwargs): + props_features = torch.stack(inputs) + batch_size = props_features.shape[0] + device = props_features.device + proposals = torch.stack([ + sample.proposals['proposals'] for sample in data_samples + ]).to(device) + gt_bbox = torch.stack([ + sample.gt_instances['gt_bbox'] for sample in data_samples + ]).to(device) + + video_info = [i.metainfo for i in data_samples] + query_tokens_ = [i['query_tokens'] for i in video_info] + query_length = [i['query_length'] for i in video_info] + query_length = torch.from_numpy(np.array(query_length)) + + max_query_len = max([i.shape[0] for i in query_tokens_]) + query_tokens = torch.zeros(batch_size, max_query_len) + for idx, query_token in enumerate(query_tokens_): + query_len = query_token.shape[0] + query_tokens[idx, :query_len] = query_token + + query_tokens = query_tokens.to(device).long() + query_length = query_length.to(device).long() # should be on CPU! + + sort_index = query_length.argsort(descending=True) + box_lists, loss_dict = self._forward(query_tokens[sort_index], + query_length[sort_index], + props_features[sort_index], + proposals[sort_index], + gt_bbox[sort_index]) + if mode == 'loss': + return loss_dict + elif mode == 'predict': + # only support batch size = 1 + bbox = box_lists[0] + + per_vid_detections = bbox['detections'] + per_vid_scores = bbox['scores'] + + props_pred = torch.cat( + (per_vid_detections, per_vid_scores.unsqueeze(-1)), dim=-1) + + props_pred = props_pred.cpu().numpy() + props_pred = sorted(props_pred, key=lambda x: x[-1], reverse=True) + props_pred = np.array(props_pred) + + props_pred = soft_nms( + props_pred, + alpha=0.4, + low_threshold=0.5, + high_threshold=0.9, + top_k=5) + result = { + 'vid_name': data_samples[0].metainfo['vid_name'], + 'gt': gt_bbox[0].cpu().numpy(), + 'predictions': props_pred, + } + return [result] + + raise ValueError(f'Unsupported mode {mode}!') + + def nms_temporal(self, start, end, score, overlap=0.45): + pick = [] + assert len(start) == len(score) + assert len(end) == len(score) + if len(start) == 0: + return pick + + union = end - start + # sort and get index + intervals = [ + i[0] for i in sorted(enumerate(score), key=lambda x: x[1]) + ] + + while len(intervals) > 0: + i = intervals[-1] + pick.append(i) + + xx1 = [max(start[i], start[j]) for j in intervals[:-1]] + xx2 = [min(end[i], end[j]) for j in intervals[:-1]] + inter = [max(0., k2 - k1) for k1, k2 in zip(xx1, xx2)] + o = [ + inter[u] / (union[i] + union[intervals[u]] - inter[u]) + for u in range(len(intervals) - 1) + ] + I_new = [] + for j in range(len(o)): + if o[j] <= overlap: + I_new.append(intervals[j]) + intervals = I_new + return np.array(pick) + + def _forward(self, query_tokens, query_length, props_features, + props_start_end, gt_bbox): + + position_info = [props_start_end, props_start_end] + position_feats = [] + query_features = self.query_encoder(query_tokens, query_length) + for i in range(len(query_features)): + query_features[i] = self.qInput[i](query_features[i]) + if i > 1: + position_info.append( + torch.cat([ + props_start_end[:, ::2 * (i - 1), [0]], + props_start_end[:, 1::2 * (i - 1), [1]] + ], + dim=-1)) + props_duration = position_info[i][:, :, 1] - position_info[i][:, :, + 0] + props_duration = props_duration.unsqueeze(-1) + position_feat = torch.cat((position_info[i], props_duration), + dim=-1).float() + position_feats.append( + self.position_transform(position_feat).permute(0, 2, 1)) + + props_features = self.prop_fc(props_features) + + inputs = props_features.permute(0, 2, 1) + outputs = self.backbone_net(inputs, query_features, position_feats) + outputs = self.fpn(outputs) + + if self.is_second_stage: + outputs = [_.detach() for _ in outputs] + box_lists, loss_dict = self.fcos(outputs, gt_bbox.float()) + + return box_lists, loss_dict diff --git a/mmaction/models/localizers/drn/drn_utils/FPN.py b/mmaction/models/localizers/drn/drn_utils/FPN.py new file mode 100644 index 0000000000..1170ac5cf3 --- /dev/null +++ b/mmaction/models/localizers/drn/drn_utils/FPN.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import torch.nn.functional as F +from torch import Tensor, nn + +from .backbone import conv_block + + +class FPN(nn.Module): + + def __init__(self, in_channels_list: List, out_channels: int) -> None: + super(FPN, self).__init__() + + inner_blocks = [] + layer_blocks = [] + for idx, in_channels in enumerate(in_channels_list, 1): + inner_block = conv_block(in_channels, out_channels, 1, 1) + layer_block = conv_block(out_channels, out_channels, 3, 1) + + inner_blocks.append(inner_block) + layer_blocks.append(layer_block) + + self.inner_blocks = nn.ModuleList(inner_blocks) + self.layer_blocks = nn.ModuleList(layer_blocks) + + def forward(self, x: Tensor) -> Tuple[Tensor]: + # process the last lowest resolution feat and + # first feed it into 1 x 1 conv + last_inner = self.inner_blocks[-1](x[-1]) + results = [self.layer_blocks[-1](last_inner)] + + for feature, inner_block, layer_block in zip( + x[:-1][::-1], self.inner_blocks[:-1][::-1], + self.layer_blocks[:-1][::-1]): + if not inner_block: + continue + inner_top_down = F.interpolate( + last_inner, scale_factor=2, mode='nearest') + inner_lateral = inner_block(feature) + last_inner = inner_lateral + inner_top_down + results.insert(0, layer_block(last_inner)) + + return tuple(results) diff --git a/mmaction/models/localizers/drn/drn_utils/__init__.py b/mmaction/models/localizers/drn/drn_utils/__init__.py new file mode 100644 index 0000000000..4d371a5055 --- /dev/null +++ b/mmaction/models/localizers/drn/drn_utils/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .backbone import Backbone +from .fcos import FCOSModule +from .FPN import FPN +from .language_module import QueryEncoder + +__all__ = ['Backbone', 'FPN', 'QueryEncoder', 'FCOSModule'] diff --git a/mmaction/models/localizers/drn/drn_utils/backbone.py b/mmaction/models/localizers/drn/drn_utils/backbone.py new file mode 100644 index 0000000000..ac2c6338d0 --- /dev/null +++ b/mmaction/models/localizers/drn/drn_utils/backbone.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import torch +from torch import Tensor, nn + + +def conv_block(in_channels: int, + out_channels: int, + kernel_size: int = 3, + stride: int = 1) -> nn.Module: + module = nn.Sequential( + nn.Conv1d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + bias=False), nn.BatchNorm1d(out_channels), nn.ReLU()) + return module + + +class Backbone(nn.Module): + + def __init__(self, channels_list: List[tuple]) -> None: + super(Backbone, self).__init__() + + self.num_layers = len(channels_list) + layers = [] + for idx, channels_config in enumerate(channels_list): + layer = conv_block(*channels_config) + layers.append(layer) + self.layers = nn.ModuleList(layers) + + def forward(self, x: Tensor, query_fts: Tensor, + position_fts: Tensor) -> Tuple[Tensor]: + results = [] + + for idx in range(self.num_layers): + query_ft = query_fts[idx].unsqueeze(1).permute(0, 2, 1) + position_ft = position_fts[idx] + x = query_ft * x + if idx == 0: + x = torch.cat([x, position_ft], dim=1) + x = self.layers[idx](x) + results.append(x) + + return tuple(results) diff --git a/mmaction/models/localizers/drn/drn_utils/fcos.py b/mmaction/models/localizers/drn/drn_utils/fcos.py new file mode 100644 index 0000000000..33b30c4cb1 --- /dev/null +++ b/mmaction/models/localizers/drn/drn_utils/fcos.py @@ -0,0 +1,192 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +from torch import nn + +from .inference import make_fcos_postprocessor +from .loss import make_fcos_loss_evaluator + + +class Scale(nn.Module): + + def __init__(self, init_value=1.0): + super(Scale, self).__init__() + self.scale = nn.Parameter(torch.FloatTensor([init_value])) + + def forward(self, x): + return x * self.scale + + +class FCOSHead(torch.nn.Module): + + def __init__(self, in_channels: int, fcos_num_class: int, + fcos_conv_layers: int, fcos_prior_prob: float, + is_second_stage: bool) -> None: + super(FCOSHead, self).__init__() + num_classes = fcos_num_class - 1 + + cls_tower = [] + bbox_tower = [] + for i in range(fcos_conv_layers): + cls_tower.append( + nn.Conv1d( + in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1)) + cls_tower.append(nn.BatchNorm1d(in_channels)) + cls_tower.append(nn.ReLU()) + bbox_tower.append( + nn.Conv1d( + in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1)) + bbox_tower.append(nn.BatchNorm1d(in_channels)) + bbox_tower.append(nn.ReLU()) + + self.cls_tower = nn.Sequential(*cls_tower) + self.bbox_tower = nn.Sequential(*bbox_tower) + self.cls_logits = nn.Conv1d( + in_channels, num_classes, kernel_size=3, stride=1, padding=1) + + self.bbox_pred = nn.Conv1d( + in_channels, 2, kernel_size=3, stride=1, padding=1) + + self.mix_fc = nn.Sequential( + nn.Conv1d(2 * in_channels, in_channels, kernel_size=1, stride=1), + nn.BatchNorm1d(in_channels), nn.ReLU()) + + self.iou_scores = nn.Sequential( + nn.Conv1d( + in_channels, + in_channels // 2, + kernel_size=3, + stride=1, + padding=1), + nn.BatchNorm1d(in_channels // 2), + nn.ReLU(), + nn.Conv1d(in_channels // 2, 1, kernel_size=1, stride=1), + ) + + # initialization + for module in self.modules(): + if isinstance(module, nn.Conv1d): + torch.nn.init.normal_(module.weight, std=0.01) + torch.nn.init.constant_(module.bias, 0) + + # initialize the bias for focal loss + bias_value = -math.log((1 - fcos_prior_prob) / fcos_prior_prob) + torch.nn.init.constant_(self.cls_logits.bias, bias_value) + + self.scales = nn.ModuleList([Scale(init_value=1.0) for _ in range(3)]) + self.is_second_stage = is_second_stage + + def forward(self, x): + logits = [] + bbox_reg = [] + iou_scores = [] + for idx, feature in enumerate(x): + cls_tower = self.cls_tower(feature) + box_tower = self.bbox_tower(feature) + logits.append(self.cls_logits(cls_tower)) + + bbox_reg_ = torch.exp(self.scales[idx](self.bbox_pred(box_tower))) + if self.is_second_stage: + bbox_reg_ = bbox_reg_.detach() + bbox_reg.append(bbox_reg_) + + mix_feature = torch.cat([cls_tower, box_tower], dim=1) + if self.is_second_stage: + mix_feature = mix_feature.detach() + mix_feature = self.mix_fc(mix_feature) + iou_scores.append(self.iou_scores(mix_feature)) + return logits, bbox_reg, iou_scores + + +class FCOSModule(torch.nn.Module): + + def __init__(self, in_channels: int, fcos_num_class: int, + fcos_conv_layers: int, fcos_prior_prob: float, + fcos_inference_thr: float, fcos_pre_nms_top_n: int, + fcos_nms_thr: float, test_detections_per_img: int, + fpn_stride: int, focal_alpha: float, focal_gamma: float, + is_first_stage: bool, is_second_stage: bool) -> None: + super(FCOSModule, self).__init__() + + head = FCOSHead( + in_channels=in_channels, + fcos_num_class=fcos_num_class, + fcos_conv_layers=fcos_conv_layers, + fcos_prior_prob=fcos_prior_prob, + is_second_stage=is_second_stage) + + self.is_first_stage = is_first_stage + self.is_second_stage = is_second_stage + box_selector_test = make_fcos_postprocessor(fcos_num_class, + fcos_inference_thr, + fcos_pre_nms_top_n, + fcos_nms_thr, + test_detections_per_img, + is_first_stage) + loss_evaluator = make_fcos_loss_evaluator(focal_alpha, focal_gamma) + self.head = head + self.box_selector_test = box_selector_test + self.loss_evaluator = loss_evaluator + self.fpn_strides = fpn_stride + + def forward(self, features, targets=None): + box_cls, box_regression, iou_scores = self.head(features) + locations = self.compute_locations(features) + + if self.training: + return self._forward_train(locations, box_cls, box_regression, + targets, iou_scores) + else: + return self._forward_test(locations, box_cls, box_regression, + targets, iou_scores) + + def _forward_train(self, locations, box_cls, box_regression, targets, + iou_scores): + loss_box_cls, loss_box_reg, loss_iou = self.loss_evaluator( + locations, box_cls, box_regression, targets, iou_scores, + self.is_first_stage) + + if self.is_second_stage: + loss_box_cls = loss_box_cls.detach() + loss_box_reg = loss_box_reg.detach() + if self.is_first_stage: + loss_iou = loss_iou.detach() + + losses = { + 'loss_cls': loss_box_cls, + 'loss_reg': loss_box_reg, + 'loss_iou': loss_iou + } + return None, losses + + def _forward_test(self, locations, box_cls, box_regression, targets, + iou_scores): + boxes = self.box_selector_test(locations, box_cls, box_regression, + iou_scores) + losses = None + return boxes, losses + + def compute_locations(self, features): + locations = [] + for level, feature in enumerate(features): + t = feature.size(-1) + locations_per_level = self.compute_locations_per_level( + t, self.fpn_strides[level], feature.device) + locations.append(locations_per_level) + return locations + + def compute_locations_per_level(self, t, stride, device): + shifts_t = torch.arange( + 0, t * stride, step=stride, dtype=torch.float32, device=device) + shifts_t = shifts_t.reshape(-1) + locations = shifts_t + stride / 2 + return locations diff --git a/mmaction/models/localizers/drn/drn_utils/inference.py b/mmaction/models/localizers/drn/drn_utils/inference.py new file mode 100644 index 0000000000..09cc7ef989 --- /dev/null +++ b/mmaction/models/localizers/drn/drn_utils/inference.py @@ -0,0 +1,212 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Copied from https://github.com/Alvin-Zeng/DRN/""" + +import torch + + +class FCOSPostProcessor(torch.nn.Module): + """Performs post-processing on the outputs of the RetinaNet boxes. + + This is only used in the testing. + """ + + def __init__(self, pre_nms_thresh, pre_nms_top_n, nms_thresh, + fpn_post_nms_top_n, min_size, num_classes, is_first_stage): + """ + Arguments: + pre_nms_thresh (float) + pre_nms_top_n (int) + nms_thresh (float) + fpn_post_nms_top_n (int) + min_size (int) + num_classes (int) + box_coder (BoxCoder) + """ + super(FCOSPostProcessor, self).__init__() + self.pre_nms_thresh = pre_nms_thresh + self.pre_nms_top_n = pre_nms_top_n + self.nms_thresh = nms_thresh + self.fpn_post_nms_top_n = fpn_post_nms_top_n + self.min_size = min_size + self.num_classes = num_classes + self.innerness_threshold = 0.15 + self.downsample_scale = 32 + self.is_first_stage = is_first_stage + + def forward_for_single_feature_map(self, locations, box_cls, + box_regression, level, iou_scores): + """ + Arguments: + anchors: list[BoxList] + box_cls: tensor of size N, A * C, H, W + box_regression: tensor of size N, A * 4, H, W + """ + N, C, T = box_cls.shape + + # put in the same format as locations + box_cls = box_cls.permute(0, 2, 1).contiguous().sigmoid() + iou_scores = iou_scores.permute(0, 2, 1).contiguous().sigmoid() + box_regression = box_regression.permute(0, 2, 1) + + # centerness = centerness.permute(0, 2, 1) + # centerness = centerness.reshape(N, -1).sigmoid() + # inner = inner.squeeze().sigmoid() + + candidate_inds = (box_cls > self.pre_nms_thresh) + pre_nms_top_n = candidate_inds.view(N, -1).sum(1) + pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n) + + # multiply the classification scores with centerness scores + # box_cls = box_cls * centerness[:, :, None] + # box_cls = box_cls + centerness[:, :, None] + if not self.is_first_stage: + box_cls = box_cls * iou_scores + + results = [] + for i in range(N): + + # per_centerness = centerness[i] + + per_box_cls = box_cls[i] + per_candidate_inds = candidate_inds[i] + per_box_cls = per_box_cls[per_candidate_inds] + + per_candidate_nonzeros = per_candidate_inds.nonzero() + per_box_loc = per_candidate_nonzeros[:, 0] + per_class = per_candidate_nonzeros[:, 1] + 1 + + per_box_regression = box_regression[i] + per_box_regression = per_box_regression[per_box_loc] + per_locations = locations[per_box_loc] + + # per_centerness = per_centerness[per_box_loc] + + per_pre_nms_top_n = pre_nms_top_n[i] + + if per_candidate_inds.sum().item() > per_pre_nms_top_n.item(): + per_box_cls, top_k_indices = \ + per_box_cls.topk(per_pre_nms_top_n, sorted=False) + per_class = per_class[top_k_indices] + per_box_regression = per_box_regression[top_k_indices] + per_locations = per_locations[top_k_indices] + + # per_centerness = per_centerness[top_k_indices] + + detections = torch.stack([ + per_locations - per_box_regression[:, 0], + per_locations + per_box_regression[:, 1], + ], + dim=1) / self.downsample_scale + + detections[:, 0].clamp_(min=0, max=1) + detections[:, 1].clamp_(min=0, max=1) + + # remove small boxes + p_start, p_end = detections.unbind(dim=1) + duration = p_end - p_start + keep = (duration >= self.min_size).nonzero().squeeze(1) + detections = detections[keep] + + temp_dict = {} + temp_dict['detections'] = detections + temp_dict['labels'] = per_class + temp_dict['scores'] = torch.sqrt(per_box_cls) + temp_dict['level'] = [level] + # temp_dict['centerness'] = per_centerness + temp_dict['locations'] = per_locations / 32 + + results.append(temp_dict) + + return results + + def forward(self, locations, box_cls, box_regression, iou_scores): + """ + Arguments: + anchors: list[list[BoxList]] + box_cls: list[tensor] + box_regression: list[tensor] + image_sizes: list[(h, w)] + Returns: + boxlists (list[BoxList]): the post-processed anchors, after + applying box decoding and NMS + """ + sampled_boxes = [] + for i, (l, o, b, iou_s) in enumerate( + zip(locations, box_cls, box_regression, iou_scores)): + sampled_boxes.append( + self.forward_for_single_feature_map(l, o, b, i, iou_s)) + + boxlists = list(zip(*sampled_boxes)) + # boxlists = [cat_boxlist(boxlist) for boxlist in boxlists] + boxlists = self.select_over_all_levels(boxlists) + + return boxlists + + # TODO very similar to filter_results from PostProcessor + # but filter_results is per image + # TODO Yang: solve this issue in the future. No good solution + # right now. + def select_over_all_levels(self, boxlists): + num_images = len(boxlists) + results = [] + for i in range(num_images): + dicts = boxlists[i] + per_vid_scores = [] + per_vid_detections = [] + per_vid_labels = [] + # add level number + per_vid_level = [] + per_vid_locations = [] + # per_vid_centerness = [] + for per_scale_dict in dicts: + if len(per_scale_dict['detections']) != 0: + per_vid_detections.append(per_scale_dict['detections']) + if len(per_scale_dict['scores']) != 0: + per_vid_scores.append(per_scale_dict['scores']) + if len(per_scale_dict['level']) != 0: + per_vid_level.append(per_scale_dict['level'] * + len(per_scale_dict['detections'])) + + if len(per_scale_dict['locations']) != 0: + per_vid_locations.append(per_scale_dict['locations']) + + # if len(per_scale_dict['centerness']) != 0: + # per_vid_centerness.append(per_scale_dict['centerness']) + if len(per_vid_detections) == 0: + per_vid_detections = torch.Tensor([0, 1]).unsqueeze(0) + per_vid_scores = torch.Tensor([1]) + per_vid_level = [[-1]] + per_vid_locations = torch.Tensor([0.5]) + # per_vid_centerness = torch.Tensor([0.5]).cuda() + else: + per_vid_detections = torch.cat(per_vid_detections, dim=0) + per_vid_scores = torch.cat(per_vid_scores, dim=0) + per_vid_level = per_vid_level + per_vid_locations = torch.cat(per_vid_locations, dim=0) + # per_vid_centerness = torch.cat(per_vid_centerness, dim=0) + + temp_dict = {} + temp_dict['detections'] = per_vid_detections + temp_dict['labels'] = per_vid_labels + temp_dict['scores'] = per_vid_scores + temp_dict['level'] = per_vid_level + # temp_dict['centerness'] = per_vid_centerness + temp_dict['locations'] = per_vid_locations + results.append(temp_dict) + + return results + + +def make_fcos_postprocessor(fcos_num_class, fcos_inference_thr, + fcos_pre_nms_top_n, fcos_nms_thr, + test_detections_per_img, is_first_stage): + box_selector = FCOSPostProcessor( + pre_nms_thresh=fcos_inference_thr, + pre_nms_top_n=fcos_pre_nms_top_n, + nms_thresh=fcos_nms_thr, + fpn_post_nms_top_n=test_detections_per_img, + min_size=0, + num_classes=fcos_num_class, + is_first_stage=is_first_stage) + + return box_selector diff --git a/mmaction/models/localizers/drn/drn_utils/language_module.py b/mmaction/models/localizers/drn/drn_utils/language_module.py new file mode 100644 index 0000000000..135652a5eb --- /dev/null +++ b/mmaction/models/localizers/drn/drn_utils/language_module.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch +from torch import Tensor, nn +from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence + + +class QueryEncoder(nn.Module): + + def __init__(self, + vocab_size: int, + hidden_dim: int = 512, + embed_dim: int = 300, + num_layers: int = 1, + bidirection: bool = True) -> None: + super(QueryEncoder, self).__init__() + self.hidden_dim = hidden_dim + self.embed_dim = embed_dim + self.embedding = nn.Embedding( + num_embeddings=vocab_size + 1, + embedding_dim=embed_dim, + padding_idx=0) + # self.embedding.weight.data.copy_(torch.load('glove_weights')) + self.biLSTM = nn.LSTM( + input_size=embed_dim, + hidden_size=self.hidden_dim, + num_layers=num_layers, + dropout=0.0, + batch_first=True, + bidirectional=bidirection) + + self.W3 = nn.Linear(hidden_dim * 4, hidden_dim) + self.W2 = nn.ModuleList( + [nn.Linear(hidden_dim, hidden_dim * 2) for _ in range(3)]) + self.W1 = nn.Linear(hidden_dim * 2, 1) + + def extract_textual(self, q_encoding: Tensor, lstm_outputs: Tensor, + q_length: Tensor, t: int): + q_cmd = self.W3(q_encoding).relu() + q_cmd = self.W2[t](q_cmd) + q_cmd = q_cmd[:, None, :] * lstm_outputs + raw_att = self.W1(q_cmd).squeeze(-1) + + raw_att = apply_mask1d(raw_att, q_length) + att = raw_att.softmax(dim=-1) + cmd = torch.bmm(att[:, None, :], lstm_outputs).squeeze(1) + return cmd + + def forward(self, query_tokens: Tensor, + query_length: Tensor) -> List[Tensor]: + self.biLSTM.flatten_parameters() + + query_embedding = self.embedding(query_tokens) + + # output denotes the forward and backward hidden states in Eq 2. + query_embedding = pack_padded_sequence( + query_embedding, query_length.cpu(), batch_first=True) + output, _ = self.biLSTM(query_embedding) + output, _ = pad_packed_sequence(output, batch_first=True) + + # q_vector denotes the global representation `g` in Eq 2. + q_vector_list = [] + + for i, length in enumerate(query_length): + h1 = output[i][0] + hs = output[i][length - 1] + q_vector = torch.cat((h1, hs), dim=-1) + q_vector_list.append(q_vector) + q_vector = torch.stack(q_vector_list) + # outputs denotes the query feature in Eq3 in 3 levels. + outputs = [] + for cmd_t in range(3): + query_feat = self.extract_textual(q_vector, output, query_length, + cmd_t) + outputs.append(query_feat) + + # Note: the output here is zero-padded + # we need slice the non-zero items for the following operations. + return outputs + + +def apply_mask1d(attention: Tensor, image_locs: Tensor) -> Tensor: + batch_size, num_loc = attention.size() + tmp1 = torch.arange( + num_loc, dtype=attention.dtype, device=attention.device) + tmp1 = tmp1.expand(batch_size, num_loc) + + tmp2 = image_locs.unsqueeze(dim=1).expand(batch_size, num_loc) + mask = tmp1 >= tmp2.to(tmp1.dtype) + attention = attention.masked_fill(mask, -1e30) + return attention diff --git a/mmaction/models/localizers/drn/drn_utils/loss.py b/mmaction/models/localizers/drn/drn_utils/loss.py new file mode 100644 index 0000000000..920ebac0b3 --- /dev/null +++ b/mmaction/models/localizers/drn/drn_utils/loss.py @@ -0,0 +1,240 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Adapted from https://github.com/Alvin-Zeng/DRN/""" + +import torch +import torchvision +from torch import nn + +INF = 100000000 + + +def SigmoidFocalLoss(alpha, gamma): + + def loss_fn(inputs, targets): + loss = torchvision.ops.sigmoid_focal_loss( + inputs=inputs, + targets=targets, + alpha=alpha, + gamma=gamma, + reduction='sum') + return loss + + return loss_fn + + +def IOULoss(): + + def loss_fn(pred, target): + pred_left = pred[:, 0] + pred_right = pred[:, 1] + + target_left = target[:, 0] + target_right = target[:, 1] + + intersect = torch.min(pred_right, target_right) + torch.min( + pred_left, target_left) + target_area = target_left + target_right + pred_area = pred_left + pred_right + union = target_area + pred_area - intersect + + losses = -torch.log((intersect + 1e-8) / (union + 1e-8)) + return losses.mean() + + return loss_fn + + +class FCOSLossComputation(object): + """This class computes the FCOS losses.""" + + def __init__(self, focal_alpha, focal_gamma): + self.cls_loss_fn = SigmoidFocalLoss(focal_alpha, focal_gamma) + self.box_reg_loss_fn = IOULoss() + self.centerness_loss_fn = nn.BCEWithLogitsLoss() + self.iou_loss_fn = nn.SmoothL1Loss() + + def prepare_targets(self, points, targets): + object_sizes_of_interest = [ + [-1, 6], + [5.6, 11], + [11, INF], + ] + expanded_object_sizes_of_interest = [] + for idx, points_per_level in enumerate(points): + object_sizes_of_interest_per_level = \ + points_per_level.new_tensor(object_sizes_of_interest[idx]) + expanded_object_sizes_of_interest.append( + object_sizes_of_interest_per_level[None].expand( + len(points_per_level), -1)) + + expanded_object_sizes_of_interest = torch.cat( + expanded_object_sizes_of_interest, dim=0) + num_points_per_level = [ + len(points_per_level) for points_per_level in points + ] + points_all_level = torch.cat(points, dim=0) + labels, reg_targets = self.compute_targets_for_locations( + points_all_level, targets, expanded_object_sizes_of_interest) + + for i in range(len(labels)): + labels[i] = torch.split(labels[i], num_points_per_level, dim=0) + reg_targets[i] = torch.split( + reg_targets[i], num_points_per_level, dim=0) + + labels_level_first = [] + reg_targets_level_first = [] + for level in range(len(points)): + labels_level_first.append( + torch.cat([labels_per_im[level] for labels_per_im in labels], + dim=0)) + reg_targets_level_first.append( + torch.cat([ + reg_targets_per_im[level] + for reg_targets_per_im in reg_targets + ], + dim=0)) + + return labels_level_first, reg_targets_level_first + + def compute_targets_for_locations(self, locations, targets, + object_sizes_of_interest): + labels = [] + reg_targets = [] + ts = locations + + for im_i in range(len(targets)): + targets_per_im = targets[im_i] + bboxes = targets_per_im * 32 + + left = ts[:, None] - bboxes[None, 0] + right = bboxes[None, 1] - ts[:, None] + reg_targets_per_im = torch.cat([left, right], dim=1) + + is_in_boxes = reg_targets_per_im.min(dim=1)[0] > 0 + max_reg_targets_per_im = reg_targets_per_im.max(dim=1)[0] + is_cared_in_the_level = \ + (max_reg_targets_per_im >= object_sizes_of_interest[:, 0]) & \ + (max_reg_targets_per_im <= object_sizes_of_interest[:, 1]) + + locations_to_gt_area = bboxes[1] - bboxes[0] + locations_to_gt_area = locations_to_gt_area.repeat( + len(locations), 1) + locations_to_gt_area[is_in_boxes == 0] = INF + locations_to_gt_area[is_cared_in_the_level == 0] = INF + + _ = locations_to_gt_area.min(dim=1) + locations_to_min_area, locations_to_gt_inds = _ + + labels_per_im = reg_targets_per_im.new_ones( + len(reg_targets_per_im)) + labels_per_im[locations_to_min_area == INF] = 0 + + labels.append(labels_per_im) + reg_targets.append(reg_targets_per_im) + + return labels, reg_targets + + def __call__(self, + locations, + box_cls, + box_regression, + targets, + iou_scores, + is_first_stage=True): + N = box_cls[0].size(0) + num_classes = box_cls[0].size(1) + labels, reg_targets = self.prepare_targets(locations, targets) + + box_cls_flatten = [] + box_regression_flatten = [] + # centerness_flatten = [] + labels_flatten = [] + reg_targets_flatten = [] + + for idx in range(len(labels)): + box_cls_flatten.append(box_cls[idx].permute(0, 2, 1).reshape( + -1, num_classes)) + box_regression_flatten.append(box_regression[idx].permute( + 0, 2, 1).reshape(-1, 2)) + labels_flatten.append(labels[idx].reshape(-1)) + reg_targets_flatten.append(reg_targets[idx].reshape(-1, 2)) + + if not is_first_stage: + # [batch, 56, 2] + merged_box_regression = torch.cat( + box_regression, dim=-1).transpose(2, 1) + # [56] + merged_locations = torch.cat(locations, dim=0) + # [batch, 56] + full_locations = merged_locations[None, :].expand( + merged_box_regression.size(0), -1).contiguous() + pred_start = full_locations - merged_box_regression[:, :, 0] + pred_end = full_locations + merged_box_regression[:, :, 1] + # [batch, 56, 2] + predictions = torch.cat( + [pred_start.unsqueeze(-1), + pred_end.unsqueeze(-1)], dim=-1) / 32 + # TODO: make sure the predictions are legal. (e.g. start < end) + predictions.clamp_(min=0, max=1) + # gt: [batch, 2] + gt_box = targets[:, None, :] + + iou_target = segment_tiou(predictions, gt_box) + iou_pred = torch.cat(iou_scores, dim=-1).squeeze().sigmoid() + iou_pos_ind = iou_target > 0.9 + pos_iou_target = iou_target[iou_pos_ind] + + pos_iou_pred = iou_pred[iou_pos_ind] + + if iou_pos_ind.sum().item() == 0: + iou_loss = torch.tensor([0.]).to(iou_pos_ind.device) + else: + iou_loss = self.iou_loss_fn(pos_iou_pred, pos_iou_target) + + box_cls_flatten = torch.cat(box_cls_flatten, dim=0) + box_regression_flatten = torch.cat(box_regression_flatten, dim=0) + labels_flatten = torch.cat(labels_flatten, dim=0) + reg_targets_flatten = torch.cat(reg_targets_flatten, dim=0) + + pos_inds = torch.nonzero(labels_flatten > 0).squeeze(1) + cls_loss = self.cls_loss_fn( + box_cls_flatten, labels_flatten.unsqueeze(1)) / ( + pos_inds.numel() + N) # add N to avoid dividing by a zero + + box_regression_flatten = box_regression_flatten[pos_inds] + reg_targets_flatten = reg_targets_flatten[pos_inds] + + if pos_inds.numel() > 0: + reg_loss = self.box_reg_loss_fn( + box_regression_flatten, + reg_targets_flatten, + ) + else: + reg_loss = box_regression_flatten.sum() + + if not is_first_stage: + return cls_loss, reg_loss, iou_loss + + return cls_loss, reg_loss, torch.tensor([0.]).to(cls_loss.device) + + +def segment_tiou(box_a, box_b): + + # gt: [batch, 1, 2], detections: [batch, 56, 2] + # calculate interaction + inter_max_xy = torch.min(box_a[:, :, -1], box_b[:, :, -1]) + inter_min_xy = torch.max(box_a[:, :, 0], box_b[:, :, 0]) + inter = torch.clamp((inter_max_xy - inter_min_xy), min=0) + + # calculate union + union_max_xy = torch.max(box_a[:, :, -1], box_b[:, :, -1]) + union_min_xy = torch.min(box_a[:, :, 0], box_b[:, :, 0]) + union = torch.clamp((union_max_xy - union_min_xy), min=0) + + iou = inter / (union + 1e-6) + + return iou + + +def make_fcos_loss_evaluator(focal_alpha, focal_gamma): + loss_evaluator = FCOSLossComputation(focal_alpha, focal_gamma) + return loss_evaluator diff --git a/tools/data/charades-sta/README.md b/tools/data/charades-sta/README.md new file mode 100644 index 0000000000..b2bea83d2b --- /dev/null +++ b/tools/data/charades-sta/README.md @@ -0,0 +1,59 @@ +# Preparing AVA + +## Introduction + + + +```BibTeX +@inproceedings{gao2017tall, + title={Tall: Temporal activity localization via language query}, + author={Gao, Jiyang and Sun, Chen and Yang, Zhenheng and Nevatia, Ram}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + pages={5267--5275}, + year={2017} +} + +@inproceedings{DRN2020CVPR, + author = {Runhao, Zeng and Haoming, Xu and Wenbing, Huang and Peihao, Chen and Mingkui, Tan and Chuang Gan}, + title = {Dense Regression Network for Video Grounding}, + booktitle = {CVPR}, + year = {2020}, +} +``` + +Charades-STA is a new dataset built on top of Charades by adding sentence temporal annotations. It is introduced by Gao et al. in `TALL: Temporal Activity Localization via Language Query`. Currently, we only support C3D features from `Dense Regression Network for Video Grounding`. + +## Step 1. Prepare Annotations + +First of all, you can run the following script to prepare annotations from the official repository of DRN: + +```shell +bash download_annotations.sh +``` + +## Step 2. Prepare C3D features + +After the first step, you should be at `${MMACTION2}/data/CharadesSTA/`. Download the C3D features following the [official command](https://github.com/Alvin-Zeng/DRN/tree/master#download-features) to the current directory `${MMACTION2}/data/CharadesSTA/`. + +After finishing the two steps, the folder structure will look like: + +``` +mmaction2 +โ”œโ”€โ”€ mmaction +โ”œโ”€โ”€ tools +โ”œโ”€โ”€ configs +โ”œโ”€โ”€ data +โ”‚ โ”œโ”€โ”€ CharadesSTA +โ”‚ โ”‚ โ”œโ”€โ”€ C3D_unit16_overlap0.5_merged +โ”‚ โ”‚ | โ”œโ”€โ”€ 001YG.pt +โ”‚ โ”‚ | โ”œโ”€โ”€ 003WS.pt +โ”‚ โ”‚ | โ”œโ”€โ”€ 004QE.pt +โ”‚ โ”‚ | โ”œโ”€โ”€ 00607.pt +โ”‚ โ”‚ | โ”œโ”€โ”€ ... +โ”‚ โ”‚ โ”œโ”€โ”€ Charades_duration.json +โ”‚ โ”‚ โ”œโ”€โ”€ Charades_fps_dict.json +โ”‚ โ”‚ โ”œโ”€โ”€ Charades_frames_info.json +โ”‚ โ”‚ โ”œโ”€โ”€ Charades_sta_test.txt +โ”‚ โ”‚ โ”œโ”€โ”€ Charades_sta_train.txt +โ”‚ โ”‚ โ”œโ”€โ”€ Charades_word2id.json +``` diff --git a/tools/data/charades-sta/download_annotations.sh b/tools/data/charades-sta/download_annotations.sh new file mode 100644 index 0000000000..85bdb7d1a8 --- /dev/null +++ b/tools/data/charades-sta/download_annotations.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +DATA_DIR="../../../data/CharadesSTA/" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +cd ${DATA_DIR} + +URL="https://raw.githubusercontent.com/Alvin-Zeng/DRN/master/data/dataset/Charades" +wget ${URL}/Charades_frames_info.json +wget ${URL}/Charades_duration.json +wget ${URL}/Charades_fps_dict.json +wget ${URL}/Charades_sta_test.txt +wget ${URL}/Charades_sta_train.txt +wget ${URL}/Charades_word2id.json From 82c7d01929ce5d9a36903c9e4ea112b5191a52d0 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Wed, 11 Oct 2023 23:55:37 -0400 Subject: [PATCH 22/24] add anet k700 slowonly feat (#2673) --- configs/localization/bmn/README.md | 9 +- ...00-9e_activitynet-slowonly-k700-feature.py | 110 ++++++++++++++++++ configs/localization/bsn/README.md | 7 +- ...0-20e_activitynet-slowonly-k700-feature.py | 84 +++++++++++++ ...8x100_activitynet-slowonly-k700-feature.py | 32 +++++ ...6-2048x100-20e_activitynet-k700-feature.py | 95 +++++++++++++++ mmaction/datasets/transforms/formatting.py | 13 +-- 7 files changed, 340 insertions(+), 10 deletions(-) create mode 100644 configs/localization/bmn/bmn_2xb8-2048x100-9e_activitynet-slowonly-k700-feature.py create mode 100644 configs/localization/bsn/bsn_pem_1xb16-2048x100-20e_activitynet-slowonly-k700-feature.py create mode 100644 configs/localization/bsn/bsn_pgm_2048x100_activitynet-slowonly-k700-feature.py create mode 100644 configs/localization/bsn/bsn_tem_1xb16-2048x100-20e_activitynet-k700-feature.py diff --git a/configs/localization/bmn/README.md b/configs/localization/bmn/README.md index ec2f625a95..f30b3a5d40 100644 --- a/configs/localization/bmn/README.md +++ b/configs/localization/bmn/README.md @@ -23,11 +23,12 @@ Temporal action proposal generation is an challenging and promising task which a | feature | gpus | pretrain | AUC | AR@1 | AR@5 | AR@10 | AR@100 | gpu_mem(M) | iter time(s) | config | ckpt | log | | :-----------: | :--: | :------: | :---: | :---: | :---: | :---: | :----: | :--------: | :----------: | :------------------------------------------: | :----------------------------------------: | :---------------------------------------: | | cuhk_mean_100 | 2 | None | 67.25 | 32.89 | 49.43 | 56.64 | 75.29 | 5412 | - | [config](/configs/localization/bmn/bmn_2xb8-400x100-9e_activitynet-feature.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/localization/bmn/bmn_2xb8-400x100-9e_activitynet-feature_20220908-79f92857.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/localization/bmn/bmn_2xb8-400x100-9e_activitynet-feature.log) | +| slowonly-k700 | 2 | None | 68.04 | 33.44 | 50.53 | 57.65 | 75.77 | - | - | [config](/configs/localization/bmn/bmn_2xb8-2048x100-9e_activitynet-slowonly-k700-feature.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/localization/bmn/bmn_2xb8-2048x100-9e_activitynet-slowonly-k700-feature_20230907-50b939b2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/localization/bmn/bmn_2xb8-2048x100-9e_activitynet-slowonly-k700-feature.log) | 1. The **gpus** indicates the number of gpu we used to get the checkpoint. According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. -2. For feature column, cuhk_mean_100 denotes the widely used cuhk activitynet feature extracted by [anet2016-cuhk](https://github.com/yjxiong/anet2016-cuhk). +2. For feature column, cuhk_mean_100 denotes the widely used cuhk activitynet feature extracted by [anet2016-cuhk](https://github.com/yjxiong/anet2016-cuhk). The slowonly-k700 denotes the feature extracted using MMAction2's [SlowOnly model trained on Kinetics 700](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.py). You can download this feature from [ActivityNet Data Preparation](/tools/data/activitynet/README.md). 3. We evaluate the action detection performance of BMN, using [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) submission for ActivityNet2017 Untrimmed Video Classification Track to assign label for each action proposal. \*We train BMN with the [official repo](https://github.com/JJBOY/BMN-Boundary-Matching-Network), evaluate its proposal generation and action detection performance with [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) for label assigning. @@ -42,6 +43,12 @@ Train BMN model on ActivityNet features dataset. bash tools/dist_train.sh configs/localization/bmn/bmn_2xb8-400x100-9e_activitynet-feature.py 2 ``` +Train BMN model on ActivityNet SlowOnly-K700 features dataset. + +```shell +bash tools/dist_train.sh configs/localization/bmn/bmn_2xb8-2048x100-9e_activitynet-slowonly-k700-feature.py 2 +``` + For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/train_test.md). ## Test diff --git a/configs/localization/bmn/bmn_2xb8-2048x100-9e_activitynet-slowonly-k700-feature.py b/configs/localization/bmn/bmn_2xb8-2048x100-9e_activitynet-slowonly-k700-feature.py new file mode 100644 index 0000000000..9230578a86 --- /dev/null +++ b/configs/localization/bmn/bmn_2xb8-2048x100-9e_activitynet-slowonly-k700-feature.py @@ -0,0 +1,110 @@ +_base_ = [ + '../../_base_/models/bmn_400x100.py', '../../_base_/default_runtime.py' +] + +model = dict(feat_dim=2048) + +# dataset settings +dataset_type = 'ActivityNetDataset' +data_root = 'data/ActivityNet/k700slowonly' +data_root_val = 'data/ActivityNet/k700slowonly' +ann_file_train = 'data/ActivityNet/anet_anno_train.json' +ann_file_val = 'data/ActivityNet/anet_anno_val.json' +ann_file_test = 'data/ActivityNet/anet_anno_val.json' + +train_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict(type='GenerateLocalizationLabels'), + dict( + type='PackLocalizationInputs', + keys=('gt_bbox', ), + meta_keys=('video_name', )) +] + +val_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict(type='GenerateLocalizationLabels'), + dict( + type='PackLocalizationInputs', + keys=('gt_bbox', ), + meta_keys=('video_name', 'duration_second', 'duration_frame', + 'annotations', 'feature_frame')) +] + +test_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict( + type='PackLocalizationInputs', + keys=('gt_bbox', ), + meta_keys=('video_name', 'duration_second', 'duration_frame', + 'annotations', 'feature_frame')) +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + drop_last=True, + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) + +val_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) + +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +max_epochs = 9 +train_cfg = dict( + type='EpochBasedTrainLoop', + max_epochs=max_epochs, + val_begin=1, + val_interval=1) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +optim_wrapper = dict( + optimizer=dict(type='Adam', lr=0.001, weight_decay=0.0001), + clip_grad=dict(max_norm=40, norm_type=2)) + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=max_epochs, + by_epoch=True, + milestones=[ + 7, + ], + gamma=0.1) +] + +work_dir = './work_dirs/bmn_400x100_2x8_9e_activitynet_feature/' +test_evaluator = dict( + type='ANetMetric', + metric_type='AR@AN', + dump_config=dict(out=f'{work_dir}/results.json', output_format='json')) +val_evaluator = test_evaluator diff --git a/configs/localization/bsn/README.md b/configs/localization/bsn/README.md index efd2d2c0d0..da52d1375d 100644 --- a/configs/localization/bsn/README.md +++ b/configs/localization/bsn/README.md @@ -23,17 +23,20 @@ Temporal action proposal generation is an important yet challenging problem, sin | feature | gpus | pretrain | AUC | AR@1 | AR@5 | AR@10 | AR@100 | gpu_mem(M) | iter time(s) | config | ckpt | log | | :-----------: | :--: | :------: | :---: | :---: | :---: | :---: | :----: | :-------------: | :----------: | :----------------------------------------: | :--------------------------------------: | :--------------------------------------: | | cuhk_mean_100 | 1 | None | 66.26 | 32.71 | 48.43 | 55.28 | 74.27 | 43(TEM)+25(PEM) | - | [config_TEM](/configs/localization/bsn/bsn_tem_1xb16-400x100-20e_activitynet-feature.py) [config_PGM](/configs/localization/bsn/bsn_pgm_400x100_activitynet-feature.py) [config_PEM](/configs/localization/bsn/bsn_pem_1xb16-400x100-20e_activitynet-feature.py) | [ckpt_TEM](https://download.openmmlab.com/mmaction/v1.0/localization/bsn/bsn_tem_1xb16-400x100-20e_activitynet-feature_20220908-9da79951.pth) [ckpt_PEM](https://download.openmmlab.com/mmaction/v1.0/localization/bsn/bsn_pem_1xb16-400x100-20e_activitynet-feature_20220908-ec2eb21d.pth) | [log_tem](https://download.openmmlab.com/mmaction/v1.0/localization/bsn/bsn_tem_1xb16-400x100-20e_activitynet-feature.log) [log_pem](https://download.openmmlab.com/mmaction/v1.0/localization/bsn/bsn_pem_1xb16-400x100-20e_activitynet-feature.log) | +| slowonly-k700 | 1 | None | 67.63 | 33.04 | 48.79 | 56.01 | 75.74 | - | - | [config_TEM](/configs/localization/bsn/bsn_tem_1xb16-2048x100-20e_activitynet-k700-feature.py) [config_PGM](/configs/localization/bsn/bsn_pgm_2048x100_activitynet-slowonly-k700-feature.py) [config_PEM](/configs/localization/bsn/bsn_pem_1xb16-2048x100-20e_activitynet-slowonly-k700-feature.py) | [ckpt_TEM](https://download.openmmlab.com/mmaction/v1.0/localization/bsn/bsn_tem_1xb16-2048x100-20e_activitynet-k700-feature_20230907-76069fda.pth) [ckpt_PEM](https://download.openmmlab.com/mmaction/v1.0/localization/bsn/bsn_pem_1xb16-2048x100-20e_activitynet-slowonly-k700-feature_20230907-44158b6d.pth) | [log_tem](https://download.openmmlab.com/mmaction/v1.0/localization/bsn/bsn_tem_1xb16-400x100-20e_activitynet-feature.log) [log_pem](https://download.openmmlab.com/mmaction/v1.0/localization/bsn/bsn_pem_1xb16-400x100-20e_activitynet-feature.log) | 1. The **gpus** indicates the number of gpu we used to get the checkpoint. According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. -2. For feature column, cuhk_mean_100 denotes the widely used cuhk activitynet feature extracted by [anet2016-cuhk](https://github.com/yjxiong/anet2016-cuhk). +2. For feature column, cuhk_mean_100 denotes the widely used cuhk activitynet feature extracted by [anet2016-cuhk](https://github.com/yjxiong/anet2016-cuhk). The slowonly-k700 denotes the feature extracted using MMAction2's [SlowOnly model trained on Kinetics 700](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.py). You can download this feature from [ActivityNet Data Preparation](/tools/data/activitynet/README.md). For more details on data preparation, you can refer to [ActivityNet Data Preparation](/tools/data/activitynet/README.md). ## Training and Test -The traing of the BSN model is three-stages. Firstly train the Temporal evaluation module (TEM): +The traing of the BSN model is three-stages. We take the `cuhk_mean_100` feature as an example. For `slowonly-k700` feature, just need to replace the config file with the corresponding config file with `slowonly-k700` in the file name. + +Firstly train the Temporal evaluation module (TEM): ```shell python3 tools/train.py configs/localization/bsn/bsn_tem_1xb16-400x100-20e_activitynet-feature.py diff --git a/configs/localization/bsn/bsn_pem_1xb16-2048x100-20e_activitynet-slowonly-k700-feature.py b/configs/localization/bsn/bsn_pem_1xb16-2048x100-20e_activitynet-slowonly-k700-feature.py new file mode 100644 index 0000000000..25bb7df698 --- /dev/null +++ b/configs/localization/bsn/bsn_pem_1xb16-2048x100-20e_activitynet-slowonly-k700-feature.py @@ -0,0 +1,84 @@ +_base_ = [ + '../../_base_/models/bsn_pem.py', '../../_base_/schedules/adam_20e.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'ActivityNetDataset' +data_root = 'data/ActivityNet/k700slowonly' +data_root_val = 'data/ActivityNet/k700slowonly' +ann_file_train = 'data/ActivityNet/anet_anno_train.json' +ann_file_val = 'data/ActivityNet/anet_anno_val.json' +ann_file_test = 'data/ActivityNet/anet_anno_val.json' + +work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/' +pgm_proposals_dir = f'{work_dir}/pgm_proposals/' +pgm_features_dir = f'{work_dir}/pgm_features/' + +train_pipeline = [ + dict( + type='LoadProposals', + top_k=500, + pgm_proposals_dir=pgm_proposals_dir, + pgm_features_dir=pgm_features_dir), + dict( + type='PackLocalizationInputs', + keys=('reference_temporal_iou', 'bsp_feature'), + meta_keys=()) +] +val_pipeline = [ + dict( + type='LoadProposals', + top_k=1000, + pgm_proposals_dir=pgm_proposals_dir, + pgm_features_dir=pgm_features_dir), + dict( + type='PackLocalizationInputs', + keys=('tmin', 'tmax', 'tmin_score', 'tmax_score', 'bsp_feature'), + meta_keys=('video_name', 'duration_second', 'duration_frame', + 'annotations', 'feature_frame')), +] +test_pipeline = val_pipeline + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) + +val_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) + +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +train_cfg = dict(val_interval=20) + +test_evaluator = dict( + type='ANetMetric', + metric_type='AR@AN', + dump_config=dict(out=f'{work_dir}/results.json', output_format='json')) +val_evaluator = test_evaluator diff --git a/configs/localization/bsn/bsn_pgm_2048x100_activitynet-slowonly-k700-feature.py b/configs/localization/bsn/bsn_pgm_2048x100_activitynet-slowonly-k700-feature.py new file mode 100644 index 0000000000..544bc12a2e --- /dev/null +++ b/configs/localization/bsn/bsn_pgm_2048x100_activitynet-slowonly-k700-feature.py @@ -0,0 +1,32 @@ +# dataset settings +dataset_type = 'ActivityNetDataset' +data_root = 'data/ActivityNet/k700slowonly' +data_root_val = 'data/ActivityNet/k700slowonly' +ann_file_train = 'data/ActivityNet/anet_anno_train.json' +ann_file_val = 'data/ActivityNet/anet_anno_val.json' +ann_file_test = 'data/ActivityNet/anet_anno_test.json' + +work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/' +tem_results_dir = f'{work_dir}/tem_results/' +pgm_proposals_dir = f'{work_dir}/pgm_proposals/' +pgm_features_dir = f'{work_dir}/pgm_features/' + +temporal_scale = 100 +pgm_proposals_cfg = dict( + pgm_proposals_thread=8, temporal_scale=temporal_scale, peak_threshold=0.5) +pgm_features_test_cfg = dict( + pgm_features_thread=32, + top_k=1000, + num_sample_start=8, + num_sample_end=8, + num_sample_action=16, + num_sample_interp=3, + bsp_boundary_ratio=0.2) +pgm_features_train_cfg = dict( + pgm_features_thread=32, + top_k=500, + num_sample_start=8, + num_sample_end=8, + num_sample_action=16, + num_sample_interp=3, + bsp_boundary_ratio=0.2) diff --git a/configs/localization/bsn/bsn_tem_1xb16-2048x100-20e_activitynet-k700-feature.py b/configs/localization/bsn/bsn_tem_1xb16-2048x100-20e_activitynet-k700-feature.py new file mode 100644 index 0000000000..c4e5821e81 --- /dev/null +++ b/configs/localization/bsn/bsn_tem_1xb16-2048x100-20e_activitynet-k700-feature.py @@ -0,0 +1,95 @@ +_base_ = ['../../_base_/models/bsn_tem.py', '../../_base_/default_runtime.py'] + +model = dict(tem_feat_dim=2048) + +# dataset settings +dataset_type = 'ActivityNetDataset' +data_root = 'data/ActivityNet/k700slowonly' +data_root_val = 'data/ActivityNet/k700slowonly' +ann_file_train = 'data/ActivityNet/anet_anno_train.json' +ann_file_val = 'data/ActivityNet/anet_anno_val.json' +ann_file_test = 'data/ActivityNet/anet_anno_trainval.json' + +train_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict(type='GenerateLocalizationLabels'), + dict( + type='PackLocalizationInputs', + keys=('gt_bbox', ), + meta_keys=('video_name', )) +] +val_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict(type='GenerateLocalizationLabels'), + dict( + type='PackLocalizationInputs', + keys=('gt_bbox', ), + meta_keys=('video_name', )) +] +test_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict(type='PackLocalizationInputs', meta_keys=('video_name', )) +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=20, val_begin=1, val_interval=20) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +optim_wrapper = dict( + optimizer=dict(type='Adam', lr=0.001, weight_decay=0.0001), + clip_grad=dict(max_norm=40, norm_type=2)) + +param_scheduler = [ + dict( + type='MultiStepLR', + begin=0, + end=20, + by_epoch=True, + milestones=[7, 14], + gamma=0.1) +] + +work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/' +tem_results_dir = f'{work_dir}/tem_results/' + +test_evaluator = dict( + type='ANetMetric', + metric_type='TEM', + dump_config=dict(out=tem_results_dir, output_format='csv')) +val_evaluator = test_evaluator + +default_hooks = dict(checkpoint=dict(filename_tmpl='tem_epoch_{}.pth')) diff --git a/mmaction/datasets/transforms/formatting.py b/mmaction/datasets/transforms/formatting.py index a8e9b9ab82..0ae1475c8b 100644 --- a/mmaction/datasets/transforms/formatting.py +++ b/mmaction/datasets/transforms/formatting.py @@ -145,18 +145,17 @@ def transform(self, results): for key in self.keys: if key not in results: continue - if key == 'gt_bbox': - instance_data = InstanceData() - instance_data[key] = to_tensor(results[key]) - data_sample.gt_instances = instance_data elif key == 'proposals': instance_data = InstanceData() instance_data[key] = to_tensor(results[key]) data_sample.proposals = instance_data else: - raise NotImplementedError( - f"Key '{key}' is not supported in `PackLocalizationInputs`" - ) + if hasattr(data_sample, 'gt_instances'): + data_sample.gt_instances[key] = to_tensor(results[key]) + else: + instance_data = InstanceData() + instance_data[key] = to_tensor(results[key]) + data_sample.gt_instances = instance_data img_meta = {k: results[k] for k in self.meta_keys if k in results} data_sample.set_metainfo(img_meta) From 69242ab0c325734a3485d471ce30824f800f63d2 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Thu, 12 Oct 2023 03:04:35 -0400 Subject: [PATCH 23/24] fix mmcv version (#2709) --- mmaction/__init__.py | 2 +- requirements/mminstall.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mmaction/__init__.py b/mmaction/__init__.py index e6453c9d44..bb5c805905 100644 --- a/mmaction/__init__.py +++ b/mmaction/__init__.py @@ -6,7 +6,7 @@ from .version import __version__ mmcv_minimum_version = '2.0.0rc4' -mmcv_maximum_version = '2.1.0' +mmcv_maximum_version = '2.2.0' mmcv_version = digit_version(mmcv.__version__) mmengine_minimum_version = '0.7.1' diff --git a/requirements/mminstall.txt b/requirements/mminstall.txt index 8381c8c000..386fc55696 100644 --- a/requirements/mminstall.txt +++ b/requirements/mminstall.txt @@ -1,2 +1,2 @@ -mmcv>=2.0.0rc0,<2.1.0 -mmengine>=0.5.0,<1.0.0 +mmcv>=2.0.0rc4,<2.2.0 +mmengine>=0.7.1,<1.0.0 From 982de664fdf4a0d11f5ab0a840fc9a06131ee5ae Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Thu, 12 Oct 2023 04:22:54 -0400 Subject: [PATCH 24/24] [Doc] add changelog and modify README and version info (#2711) --- README.md | 15 +++++++-------- docs/en/notes/changelog.md | 28 ++++++++++++++++++++++++++++ mmaction/version.py | 2 +- 3 files changed, 36 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index f9997f54f7..46a22bb17e 100644 --- a/README.md +++ b/README.md @@ -75,14 +75,13 @@ English | [็ฎ€ไฝ“ไธญๆ–‡](/README_zh-CN.md) **The default branch has been switched to `main`(previous `1.x`) from `master`(current `0.x`), and we encourage users to migrate to the latest version with more supported models, stronger pre-training checkpoints and simpler coding. Please refer to [Migration Guide](https://mmaction2.readthedocs.io/en/latest/migration.html) for more details.** -**Release (2023.07.04)**: v1.1.0 with the following new features: - -- Support CLIP-based multi-modality models: ActionCLIP(Arxiv'2021) and CLIP4clip(ArXiv'2022) -- Support rich projects: gesture recognition, spatio-temporal action detection tutorial, and knowledge distillation -- Support HACS-segments dataset(ICCV'2019), MultiSports dataset(ICCV'2021), Kinetics-710 dataset(Arxiv'2022) -- Support VideoMAE V2(CVPR'2023), and VideoMAE(NeurIPS'2022) on action detection -- Support TCANet(CVPR'2021) -- Support [Pure Python style Configuration File](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta) and downloading datasets by MIM with one command +**Release (2023.10.12)**: v1.2.0 with the following new features: + +- Support VindLU multi-modality algorithm and the Training of ActionClip +- Support lightweight model MobileOne TSN/TSM +- Support video retrieval dataset MSVD +- Support SlowOnly K700 feature to train localization models +- Support Video and Audio Demos ## ๐Ÿ“– Introduction [๐Ÿ”](#-table-of-contents) diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md index 66dfd3b144..f6383f2863 100644 --- a/docs/en/notes/changelog.md +++ b/docs/en/notes/changelog.md @@ -1,5 +1,33 @@ # Changelog +## 1.2.0 (10/12/2023) + +**Highlights** + +- Support the Training of ActionClip +- Support VindLU multi-modality algorithm +- Support MobileOne TSN/TSM + +**New Features** + +- Support the Training of ActionClip ([2620](https://github.com/open-mmlab/mmaction2/pull/2620)) +- Support video retrieval dataset MSVD ([2622](https://github.com/open-mmlab/mmaction2/pull/2622)) +- Support VindLU multi-modality algorithm ([2667](https://github.com/open-mmlab/mmaction2/pull/2667)) +- Support Dense Regression Network for Video Grounding ([2668](https://github.com/open-mmlab/mmaction2/pull/2668)) + +**Improvements** + +- Support Video Demos ([2602](https://github.com/open-mmlab/mmaction2/pull/2602)) +- Support Audio Demos ([2603](https://github.com/open-mmlab/mmaction2/pull/2603)) +- Add README_zh-CN.md for Swin and VideoMAE ([2621](https://github.com/open-mmlab/mmaction2/pull/2621)) +- Support MobileOne TSN/TSM ([2656](https://github.com/open-mmlab/mmaction2/pull/2656)) +- Support SlowOnly K700 feature to train localization models ([2673](https://github.com/open-mmlab/mmaction2/pull/2673)) + +**Bug Fixes** + +- Refine ActionDataSample structure ([2658](https://github.com/open-mmlab/mmaction2/pull/2658)) +- Fix MPS device ([2619](https://github.com/open-mmlab/mmaction2/pull/2619)) + ## 1.1.0 (7/3/2023) **Highlights** diff --git a/mmaction/version.py b/mmaction/version.py index acae488d8a..94905dc04e 100644 --- a/mmaction/version.py +++ b/mmaction/version.py @@ -1,6 +1,6 @@ # Copyright (c) Open-MMLab. All rights reserved. -__version__ = '1.1.0' +__version__ = '1.2.0' def parse_version_info(version_str: str):